--- /dev/null
+/*
+ * Disk Array driver for HP SA 5xxx and 6xxx Controllers
+ * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
+ *
+ */
+
+#include <xeno/config.h> /* CONFIG_PROC_FS */
+#include <xeno/module.h>
+#include <xeno/version.h>
+#include <xeno/types.h>
+#include <xeno/pci.h>
+#include <xeno/kernel.h>
+#include <xeno/slab.h>
+#include <xeno/delay.h>
+#include <xeno/major.h>
+//#include <xeno/fs.h>
+#include <xeno/blkpg.h>
+#include <xeno/interrupt.h>
+#include <xeno/timer.h>
+//#include <xeno/proc_fs.h>
+#include <xeno/init.h>
+#include <xeno/hdreg.h>
+#include <xeno/spinlock.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+//#include <xeno/smp_lock.h>
+
+#include <xeno/blk.h>
+#include <xeno/blkdev.h>
+#include <xeno/genhd.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
+#define DRIVER_NAME "HP CISS Driver (v 2.4.50)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,50)
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.50");
+MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400 6i");
+MODULE_LICENSE("GPL");
+
+#include "cciss_cmd.h"
+#include "cciss.h"
+//#include <xeno/cciss_ioctl.h>
+
+/* define the PCI info for the cards we can control */
+const struct pci_device_id cciss_pci_device_id[] = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
+ 0x0E11, 0x4070, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+ 0x0E11, 0x4080, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+ 0x0E11, 0x4082, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+ 0x0E11, 0x4083, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+ 0x0E11, 0x409A, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+ 0x0E11, 0x409B, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+ 0x0E11, 0x409C, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+ 0x0E11, 0x409D, 0, 0, 0},
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+ 0x0E11, 0x4091, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
+
+#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
+
+/* board_id = Subsystem Device ID & Vendor ID
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+static struct board_type products[] = {
+ { 0x40700E11, "Smart Array 5300", &SA5_access},
+ { 0x40800E11, "Smart Array 5i", &SA5B_access},
+ { 0x40820E11, "Smart Array 532", &SA5B_access},
+ { 0x40830E11, "Smart Array 5312", &SA5B_access},
+ { 0x409A0E11, "Smart Array 641", &SA5_access},
+ { 0x409B0E11, "Smart Array 642", &SA5_access},
+ { 0x409C0E11, "Smart Array 6400", &SA5_access},
+ { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
+ { 0x40910E11, "Smart Array 6i", &SA5_access},
+};
+
+/* How long to wait (in millesconds) for board to go into simple mode */
+#define MAX_CONFIG_WAIT 30000
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+#define READ_AHEAD 128
+#define NR_CMDS 128 /* #commands that can be outstanding */
+#define MAX_CTLR 32
+
+/* No sense in giving up our preallocated major numbers */
+#if MAX_CTLR < 8
+#error"cciss.c: MAX_CTLR must be 8 or greater"
+#endif
+
+/* Originally cciss driver only supports 8 major number */
+#define MAX_CTLR_ORIG COMPAQ_CISS_MAJOR7 - COMPAQ_CISS_MAJOR + 1
+
+#define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */
+
+#ifdef CONFIG_CISS_MONITOR_THREAD
+static int cciss_monitor(void *ctlr);
+static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
+ unsigned long count, int (*cciss_monitor)(void *), int *rc);
+static u32 heartbeat_timer = 0;
+#else
+#define cciss_monitor(x)
+#define kill_monitor_thead(x)
+#endif
+
+static ctlr_info_t *hba[MAX_CTLR];
+static int map_major_to_ctlr[MAX_BLKDEV] = {0}; /* gets ctlr num from maj num */
+//static struct proc_dir_entry *proc_cciss;
+
+static void do_cciss_request(request_queue_t *q);
+static int cciss_open(struct inode *inode, struct file *filep);
+static int cciss_release(struct inode *inode, struct file *filep);
+static int cciss_ioctl(struct inode *inode, struct file *filep,
+ unsigned int cmd, unsigned long arg);
+
+static int revalidate_logvol(kdev_t dev, int maxusage);
+static int frevalidate_logvol(kdev_t dev);
+static int deregister_disk(int ctlr, int logvol);
+static int register_new_disk(int cltr, int opened_vol, __u64 requested_lun);
+static int cciss_rescan_disk(int cltr, int logvol);
+
+static void cciss_getgeometry(int cntl_num);
+
+static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c);
+static void start_io( ctlr_info_t *h);
+
+
+#ifdef CONFIG_PROC_FS
+static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data);
+static void cciss_procinit(int i);
+
+XXX
+#else
+/*static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data) { return 0;}
+*/
+static void cciss_procinit(int i) {}
+
+#endif /* CONFIG_PROC_FS */
+
+
+static struct block_device_operations cciss_fops = {
+ //owner: THIS_MODULE,
+ open: cciss_open,
+ release: cciss_release,
+ ioctl: cciss_ioctl,
+ revalidate: frevalidate_logvol,
+};
+
+#include "cciss_scsi.c" /* For SCSI tape support */
+
+#define ENG_GIG 1048576000
+#define ENG_GIG_FACTOR (ENG_GIG/512)
+#define RAID_UNKNOWN 6
+static const char *raid_label[] = {"0","4","1(0+1)","5","5+1","ADG",
+ "UNKNOWN"};
+/*
+ * Report information about this controller.
+ */
+#ifdef CONFIG_PROC_FS
+static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ off_t pos = 0;
+ off_t len = 0;
+ int size, i, ctlr;
+ ctlr_info_t *h = (ctlr_info_t*)data;
+ drive_info_struct *drv;
+ unsigned long flags;
+ unsigned int vol_sz, vol_sz_frac;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ if (h->busy_configuring) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return -EBUSY;
+ }
+ h->busy_configuring = 1;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ ctlr = h->ctlr;
+ size = sprintf(buffer, "%s: HP %s Controller\n"
+ "Board ID: 0x%08lx\n"
+ "Firmware Version: %c%c%c%c\n"
+ "IRQ: %d\n"
+ "Logical drives: %d\n"
+ "Current Q depth: %d\n"
+ "Current # commands on controller: %d\n"
+ "Max Q depth since init: %d\n"
+ "Max # commands on controller since init: %d\n"
+ "Max SG entries since init: %d\n"
+ MONITOR_PERIOD_PATTERN
+ MONITOR_DEADLINE_PATTERN
+ MONITOR_STATUS_PATTERN
+ "\n",
+ h->devname,
+ h->product_name,
+ (unsigned long)h->board_id,
+ h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
+ (unsigned int)h->intr,
+ h->num_luns,
+ h->Qdepth, h->commands_outstanding,
+ h->maxQsinceinit, h->max_outstanding, h->maxSG,
+ MONITOR_PERIOD_VALUE(h),
+ MONITOR_DEADLINE_VALUE(h),
+ CTLR_STATUS(h));
+
+ pos += size; len += size;
+ cciss_proc_tape_report(ctlr, buffer, &pos, &len);
+ for(i=0; i<=h->highest_lun; i++) {
+ drv = &h->drv[i];
+ if (drv->nr_blocks == 0)
+ continue;
+ vol_sz = drv->nr_blocks/ENG_GIG_FACTOR;
+ vol_sz_frac = (drv->nr_blocks%ENG_GIG_FACTOR)*100/ENG_GIG_FACTOR;
+
+ if (drv->raid_level > 5)
+ drv->raid_level = RAID_UNKNOWN;
+ size = sprintf(buffer+len, "cciss/c%dd%d:"
+ "\t%4d.%02dGB\tRAID %s\n",
+ ctlr, i, vol_sz,vol_sz_frac,
+ raid_label[drv->raid_level]);
+ pos += size, len += size;
+ }
+
+ *eof = 1;
+ *start = buffer+offset;
+ len -= offset;
+ if (len>length)
+ len = length;
+ h->busy_configuring = 0;
+ return len;
+}
+
+static int
+cciss_proc_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned char cmd[80];
+ int len;
+ ctlr_info_t *h = (ctlr_info_t *) data;
+ int rc;
+
+ if (count > sizeof(cmd)-1)
+ return -EINVAL;
+ if (copy_from_user(cmd, buffer, count))
+ return -EFAULT;
+ cmd[count] = '\0';
+ len = strlen(cmd);
+ if (cmd[len-1] == '\n')
+ cmd[--len] = '\0';
+
+# ifdef CONFIG_CISS_SCSI_TAPE
+ if (strcmp("engage scsi", cmd)==0) {
+ rc = cciss_engage_scsi(h->ctlr);
+ if (rc != 0)
+ return -rc;
+ return count;
+ }
+ /* might be nice to have "disengage" too, but it's not
+ safely possible. (only 1 module use count, lock issues.) */
+# endif
+
+ if (START_MONITOR_THREAD(h, cmd, count, cciss_monitor, &rc) == 0)
+ return rc;
+
+ return -EINVAL;
+}
+
+/*
+ * Get us a file in /proc/cciss that says something about each controller.
+ * Create /proc/cciss if it doesn't exist yet.
+ */
+static void __init cciss_procinit(int i)
+{
+ struct proc_dir_entry *pde;
+
+ if (proc_cciss == NULL) {
+ proc_cciss = proc_mkdir("cciss", proc_root_driver);
+ if (!proc_cciss) {
+ printk("cciss: proc_mkdir failed\n");
+ return;
+ }
+ }
+
+ pde = create_proc_read_entry(hba[i]->devname,
+ S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
+ proc_cciss, cciss_proc_get_info, hba[i]);
+ pde->write_proc = cciss_proc_write;
+}
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * For operations that cannot sleep, a command block is allocated at init,
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use. For operations that can wait for kmalloc
+ * to possible sleep, this routine can be called with get_from_pool set to 0.
+ * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
+ */
+static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
+{
+ CommandList_struct *c;
+ int i;
+ u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ if (!get_from_pool) {
+ c = (CommandList_struct *) pci_alloc_consistent(
+ h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
+ if (c==NULL)
+ return NULL;
+ memset(c, 0, sizeof(CommandList_struct));
+
+ c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
+ h->pdev, sizeof(ErrorInfo_struct),
+ &err_dma_handle);
+
+ if (c->err_info == NULL)
+ {
+ pci_free_consistent(h->pdev,
+ sizeof(CommandList_struct), c, cmd_dma_handle);
+ return NULL;
+ }
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ } else /* get it out of the controllers pool */
+ {
+ do {
+ i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
+ if (i == NR_CMDS)
+ return NULL;
+ } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
+#endif
+ c = h->cmd_pool + i;
+ memset(c, 0, sizeof(CommandList_struct));
+ cmd_dma_handle = h->cmd_pool_dhandle
+ + i*sizeof(CommandList_struct);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i*sizeof(ErrorInfo_struct);
+ h->nr_allocs++;
+ }
+
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(ErrorInfo_struct);
+
+ c->ctlr = h->ctlr;
+ return c;
+
+
+}
+
+/*
+ * Frees a command block that was previously allocated with cmd_alloc().
+ */
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
+{
+ int i;
+ u64bit temp64;
+
+ if (!got_from_pool) {
+ temp64.val32.lower = c->ErrDesc.Addr.lower;
+ temp64.val32.upper = c->ErrDesc.Addr.upper;
+ pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ c->err_info, (dma_addr_t) temp64.val);
+ pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+ c, (dma_addr_t) c->busaddr);
+ } else
+ {
+ i = c - h->cmd_pool;
+ clear_bit(i%32, h->cmd_pool_bits+(i/32));
+ h->nr_frees++;
+ }
+}
+
+/*
+ * fills in the disk information.
+ */
+static void cciss_geninit( int ctlr)
+{
+ drive_info_struct *drv;
+ int i,j;
+
+ /* Loop through each real device */
+ hba[ctlr]->gendisk.nr_real = 0;
+ for(i=0; i< NWD; i++) {
+ drv = &(hba[ctlr]->drv[i]);
+ if (!(drv->nr_blocks))
+ continue;
+ hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
+ hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
+
+ /* for each partition */
+ for(j=0; j<MAX_PART; j++) {
+ hba[ctlr]->blocksizes[(i<<NWD_SHIFT) + j] = 1024;
+
+ hba[ctlr]->hardsizes[ (i<<NWD_SHIFT) + j] =
+ drv->block_size;
+ }
+ }
+ hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
+}
+/*
+ * Open. Make sure the device is really there.
+ */
+static int cciss_open(struct inode *inode, struct file *filep)
+{
+ int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
+ int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
+#endif /* CCISS_DEBUG */
+
+ if (ctlr > MAX_CTLR || hba[ctlr] == NULL || !CTLR_IS_ALIVE(hba[ctlr]))
+ return -ENXIO;
+ /*
+ * Root is allowed to open raw volume zero even if its not configured
+ * so array config can still work. Root is also allowed to open any
+ * volume that has a LUN ID, so it can issue IOCTL to reread the
+ * disk information. I don't think I really like this.
+ * but I'm already using way to many device nodes to claim another one
+ * for "raw controller".
+ */
+ if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) { /* not online? */
+ if (MINOR(inode->i_rdev) != 0) { /* not node 0? */
+ /* if not node 0 make sure it is a partition = 0 */
+ if (MINOR(inode->i_rdev) & 0x0f) {
+ return -ENXIO;
+ /* if it is, make sure we have a LUN ID */
+ } else if (hba[ctlr]->drv[MINOR(inode->i_rdev)
+ >> NWD_SHIFT].LunID == 0) {
+ return -ENXIO;
+ }
+ }
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
+ hba[ctlr]->drv[dsk].usage_count++;
+ hba[ctlr]->usage_count++;
+ return 0;
+}
+/*
+ * Close. Sync first.
+ */
+static int cciss_release(struct inode *inode, struct file *filep)
+{
+ int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
+ int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
+#endif /* CCISS_DEBUG */
+
+ /* fsync_dev(inode->i_rdev); */
+
+ hba[ctlr]->drv[dsk].usage_count--;
+ hba[ctlr]->usage_count--;
+ return 0;
+}
+
+/*
+ * ioctl
+ */
+static int cciss_ioctl(struct inode *inode, struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ //int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
+
+ //int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
+
+printk(KERN_ALERT "cciss_ioctl: Called BUT NOT SUPPORTED cmd=%x %lx\n", cmd, arg);
+
+return -EBADRQC;
+
+#if 0
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
+#endif /* CCISS_DEBUG */
+
+ switch(cmd) {
+ case HDIO_GETGEO:
+ {
+ struct hd_geometry driver_geo;
+ if (hba[ctlr]->drv[dsk].cylinders) {
+ driver_geo.heads = hba[ctlr]->drv[dsk].heads;
+ driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
+ driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
+ } else {
+ driver_geo.heads = 0xff;
+ driver_geo.sectors = 0x3f;
+ driver_geo.cylinders =
+ hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
+ }
+ driver_geo.start=
+ hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
+ if (copy_to_user((void *) arg, &driver_geo,
+ sizeof( struct hd_geometry)))
+ return -EFAULT;
+ return 0;
+ }
+ case HDIO_GETGEO_BIG:
+ {
+ struct hd_big_geometry driver_geo;
+ if (hba[ctlr]->drv[dsk].cylinders) {
+ driver_geo.heads = hba[ctlr]->drv[dsk].heads;
+ driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
+ driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
+ } else {
+ driver_geo.heads = 0xff;
+ driver_geo.sectors = 0x3f;
+ driver_geo.cylinders =
+ hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
+ }
+ driver_geo.start=
+ hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
+ if (copy_to_user((void *) arg, &driver_geo,
+ sizeof( struct hd_big_geometry)))
+ return -EFAULT;
+ return 0;
+ }
+ case BLKRRPART:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return revalidate_logvol(inode->i_rdev, 1);
+ case BLKGETSIZE:
+ case BLKGETSIZE64:
+ case BLKFLSBUF:
+ case BLKBSZSET:
+ case BLKBSZGET:
+ case BLKROSET:
+ case BLKROGET:
+ case BLKRASET:
+ case BLKRAGET:
+ case BLKPG:
+ case BLKELVGET:
+ case BLKELVSET:
+ return blk_ioctl(inode->i_rdev, cmd, arg);
+ case CCISS_GETPCIINFO:
+ {
+ cciss_pci_info_struct pciinfo;
+
+ if (!arg)
+ return -EINVAL;
+ pciinfo.bus = hba[ctlr]->pdev->bus->number;
+ pciinfo.dev_fn = hba[ctlr]->pdev->devfn;
+ pciinfo.board_id = hba[ctlr]->board_id;
+ if (copy_to_user((void *) arg, &pciinfo, sizeof( cciss_pci_info_struct )))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_GETINTINFO:
+ {
+ cciss_coalint_struct intinfo;
+ ctlr_info_t *c = hba[ctlr];
+
+ if (!arg)
+ return -EINVAL;
+ intinfo.delay = readl(&c->cfgtable->HostWrite.CoalIntDelay);
+ intinfo.count = readl(&c->cfgtable->HostWrite.CoalIntCount);
+ if (copy_to_user((void *) arg, &intinfo, sizeof( cciss_coalint_struct )))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_SETINTINFO:
+ {
+ cciss_coalint_struct intinfo;
+ ctlr_info_t *c = hba[ctlr];
+ unsigned long flags;
+ int i;
+
+ if (!arg)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&intinfo, (void *) arg, sizeof( cciss_coalint_struct)))
+ return -EFAULT;
+ if ( (intinfo.delay == 0 ) && (intinfo.count == 0)) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ /* Can only safely update if no commands outstanding */
+ if (c->commands_outstanding > 0 ) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return -EINVAL;
+ }
+ /* Update the field, and then ring the doorbell */
+ writel( intinfo.delay,
+ &(c->cfgtable->HostWrite.CoalIntDelay));
+ writel( intinfo.count,
+ &(c->cfgtable->HostWrite.CoalIntCount));
+ writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+
+ for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
+ if (!(readl(c->vaddr + SA5_DOORBELL)
+ & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ udelay(1000);
+ }
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ if (i >= MAX_IOCTL_CONFIG_WAIT)
+ /* there is an unlikely case where this can happen,
+ * involving hot replacing a failed 144 GB drive in a
+ * RAID 5 set just as we attempt this ioctl. */
+ return -EAGAIN;
+ return 0;
+ }
+ case CCISS_GETNODENAME:
+ {
+ NodeName_type NodeName;
+ ctlr_info_t *c = hba[ctlr];
+ int i;
+
+ if (!arg)
+ return -EINVAL;
+ for(i=0;i<16;i++)
+ NodeName[i] = readb(&c->cfgtable->ServerName[i]);
+ if (copy_to_user((void *) arg, NodeName, sizeof( NodeName_type)))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_SETNODENAME:
+ {
+ NodeName_type NodeName;
+ ctlr_info_t *c = hba[ctlr];
+ unsigned long flags;
+ int i;
+
+ if (!arg)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(NodeName, (void *) arg, sizeof( NodeName_type)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ /* Update the field, and then ring the doorbell */
+ for(i=0;i<16;i++)
+ writeb( NodeName[i], &c->cfgtable->ServerName[i]);
+
+ writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+
+ for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
+ if (!(readl(c->vaddr + SA5_DOORBELL)
+ & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ udelay(1000);
+ }
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ if (i >= MAX_IOCTL_CONFIG_WAIT)
+ /* there is an unlikely case where this can happen,
+ * involving hot replacing a failed 144 GB drive in a
+ * RAID 5 set just as we attempt this ioctl. */
+ return -EAGAIN;
+ return 0;
+ }
+
+ case CCISS_GETHEARTBEAT:
+ {
+ Heartbeat_type heartbeat;
+ ctlr_info_t *c = hba[ctlr];
+
+ if (!arg)
+ return -EINVAL;
+ heartbeat = readl(&c->cfgtable->HeartBeat);
+ if (copy_to_user((void *) arg, &heartbeat, sizeof( Heartbeat_type)))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_GETBUSTYPES:
+ {
+ BusTypes_type BusTypes;
+ ctlr_info_t *c = hba[ctlr];
+
+ if (!arg)
+ return -EINVAL;
+ BusTypes = readl(&c->cfgtable->BusTypes);
+ if (copy_to_user((void *) arg, &BusTypes, sizeof( BusTypes_type) ))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_GETFIRMVER:
+ {
+ FirmwareVer_type firmware;
+
+ if (!arg)
+ return -EINVAL;
+ memcpy(firmware, hba[ctlr]->firm_ver, 4);
+
+ if (copy_to_user((void *) arg, firmware, sizeof( FirmwareVer_type)))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_GETDRIVVER:
+ {
+ DriverVer_type DriverVer = DRIVER_VERSION;
+
+ if (!arg)
+ return -EINVAL;
+
+ if (copy_to_user((void *) arg, &DriverVer, sizeof( DriverVer_type) ))
+ return -EFAULT;
+ return 0;
+ }
+ case CCISS_RESCANDISK:
+ {
+ return cciss_rescan_disk(ctlr, dsk);
+ }
+ case CCISS_DEREGDISK:
+ return deregister_disk(ctlr,dsk);
+
+ case CCISS_REGNEWD:
+ return register_new_disk(ctlr, dsk, 0);
+ case CCISS_REGNEWDISK:
+ {
+ __u64 new_logvol;
+
+ if (!arg)
+ return -EINVAL;
+ if (copy_from_user(&new_logvol, (void *) arg,
+ sizeof( __u64)))
+ return -EFAULT;
+ return register_new_disk(ctlr, dsk, new_logvol);
+ }
+ case CCISS_GETLUNINFO:
+ {
+ LogvolInfo_struct luninfo;
+ int num_parts = 0;
+ int i, start;
+
+ luninfo.LunID = hba[ctlr]->drv[dsk].LunID;
+ luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
+
+ /* count partitions 1 to 15 with sizes > 0 */
+ start = (dsk << NWD_SHIFT);
+ for(i=1; i <MAX_PART; i++) {
+ int minor = start+i;
+ if (hba[ctlr]->sizes[minor] != 0)
+ num_parts++;
+ }
+ luninfo.num_parts = num_parts;
+ if (copy_to_user((void *) arg, &luninfo,
+ sizeof( LogvolInfo_struct) ))
+ return -EFAULT;
+ return 0;
+ }
+#if 0
+ case CCISS_PASSTHRU:
+ {
+ IOCTL_Command_struct iocommand;
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ char *buff = NULL;
+ u64bit temp64;
+ unsigned long flags;
+ DECLARE_COMPLETION(wait);
+
+ if (!arg)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (copy_from_user(&iocommand, (void *) arg, sizeof( IOCTL_Command_struct) ))
+ return -EFAULT;
+ if ((iocommand.buf_size < 1) &&
+ (iocommand.Request.Type.Direction
+ != XFER_NONE)) {
+ return -EINVAL;
+ }
+ /* Check kmalloc limits */
+ if (iocommand.buf_size > 128000)
+ return -EINVAL;
+ if (iocommand.buf_size > 0) {
+ buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (buff == NULL)
+ return -ENOMEM;
+ }
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
+ {
+ kfree(buff);
+ return -EFAULT;
+ }
+ }
+ if ((c = cmd_alloc(h , 0)) == NULL) {
+ kfree(buff);
+ return -ENOMEM;
+ }
+ /* Fill in the command type */
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (iocommand.buf_size > 0) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal= 1;
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal= 0;
+ }
+ c->Header.LUN = iocommand.LUN_info;
+ c->Header.Tag.lower = c->busaddr; /* use the kernel address */
+ /* the cmd block for tag */
+
+ /* Fill in Request block */
+ c->Request = iocommand.Request;
+
+ /* Fill in the scatter gather information */
+ if (iocommand.buf_size > 0 ) {
+ temp64.val = pci_map_single( h->pdev, buff,
+ iocommand.buf_size,
+ PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Addr.lower = temp64.val32.lower;
+ c->SG[0].Addr.upper = temp64.val32.upper;
+ c->SG[0].Len = iocommand.buf_size;
+ c->SG[0].Ext = 0; /* we are not chaining */
+ }
+ c->waiting = &wait;
+
+ /* Put the request on the tail of the request queue */
+ spin_lock_irqsave(&io_request_lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ wait_for_completion(&wait);
+
+ /* unlock the buffers from DMA */
+ temp64.val32.lower = c->SG[0].Addr.lower;
+ temp64.val32.upper = c->SG[0].Addr.upper;
+ pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+
+ /* Copy the error information out */
+ iocommand.error_info = *(c->err_info);
+ if (copy_to_user((void *) arg, &iocommand,
+ sizeof( IOCTL_Command_struct) ) ) {
+ kfree(buff);
+ cmd_free(h, c, 0);
+ return( -EFAULT);
+ }
+
+ if (iocommand.Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ if (copy_to_user(iocommand.buf, buff,
+ iocommand.buf_size)) {
+ kfree(buff);
+ cmd_free(h, c, 0);
+ return -EFAULT;
+ }
+ }
+ kfree(buff);
+ cmd_free(h, c, 0);
+ return 0;
+ }
+ case CCISS_BIG_PASSTHRU:
+ {
+ BIG_IOCTL_Command_struct iocommand;
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ char *buff[MAXSGENTRIES] = {NULL,};
+ int buff_size[MAXSGENTRIES] = {0,};
+ u64bit temp64;
+ unsigned long flags;
+ BYTE sg_used = 0;
+ int status = 0;
+ int i;
+ DECLARE_COMPLETION(wait);
+
+ if (!arg)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (copy_from_user(&iocommand, (void *) arg, sizeof( BIG_IOCTL_Command_struct) ))
+ return -EFAULT;
+ if ((iocommand.buf_size < 1) &&
+ (iocommand.Request.Type.Direction != XFER_NONE)) {
+ return -EINVAL;
+ }
+ /* Check kmalloc limits using all SGs */
+ if (iocommand.malloc_size > MAX_KMALLOC_SIZE)
+ return -EINVAL;
+ if (iocommand.buf_size > iocommand.malloc_size * MAXSGENTRIES)
+ return -EINVAL;
+ if (iocommand.buf_size > 0) {
+ __u32 size_left_alloc = iocommand.buf_size;
+ BYTE *data_ptr = (BYTE *) iocommand.buf;
+ while (size_left_alloc > 0) {
+ buff_size[sg_used] = (size_left_alloc
+ > iocommand.malloc_size)
+ ? iocommand.malloc_size : size_left_alloc;
+ buff[sg_used] = kmalloc( buff_size[sg_used],
+ GFP_KERNEL);
+ if (buff[sg_used] == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (iocommand.Request.Type.Direction ==
+ XFER_WRITE)
+ /* Copy the data into the buffer created */
+ if (copy_from_user(buff[sg_used], data_ptr,
+ buff_size[sg_used])) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ size_left_alloc -= buff_size[sg_used];
+ data_ptr += buff_size[sg_used];
+ sg_used++;
+ }
+
+ }
+ if ((c = cmd_alloc(h , 0)) == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ /* Fill in the command type */
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+
+ if (iocommand.buf_size > 0) { /* buffer to fill */
+ c->Header.SGList = sg_used;
+ c->Header.SGTotal= sg_used;
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal= 0;
+ }
+ c->Header.LUN = iocommand.LUN_info;
+ c->Header.Tag.lower = c->busaddr; /* use the kernel address */
+ /* the cmd block for tag */
+
+ /* Fill in Request block */
+ c->Request = iocommand.Request;
+ /* Fill in the scatter gather information */
+ if (iocommand.buf_size > 0 ) {
+ int i;
+ for(i=0; i< sg_used; i++) {
+ temp64.val = pci_map_single( h->pdev, buff[i],
+ buff_size[i],
+ PCI_DMA_BIDIRECTIONAL);
+
+ c->SG[i].Addr.lower = temp64.val32.lower;
+ c->SG[i].Addr.upper = temp64.val32.upper;
+ c->SG[i].Len = buff_size[i];
+ c->SG[i].Ext = 0; /* we are not chaining */
+ }
+ }
+ c->waiting = &wait;
+ /* Put the request on the tail of the request queue */
+ spin_lock_irqsave(&io_request_lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ wait_for_completion(&wait);
+ /* unlock the buffers from DMA */
+ for(i=0; i< sg_used; i++) {
+ temp64.val32.lower = c->SG[i].Addr.lower;
+ temp64.val32.upper = c->SG[i].Addr.upper;
+ pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
+ buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ }
+ /* Copy the error information out */
+ iocommand.error_info = *(c->err_info);
+ if (copy_to_user((void *) arg, &iocommand,
+ sizeof( IOCTL_Command_struct) ) ) {
+ cmd_free(h, c, 0);
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if (iocommand.Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ BYTE *ptr = (BYTE *) iocommand.buf;
+ for(i=0; i< sg_used; i++) {
+ if (copy_to_user(ptr, buff[i], buff_size[i])) {
+ cmd_free(h, c, 0);
+ status = -EFAULT;
+ goto cleanup1;
+
+ }
+ ptr += buff_size[i];
+ }
+ }
+ cmd_free(h, c, 0);
+ status = 0;
+
+
+cleanup1:
+ for(i=0; i< sg_used; i++) {
+ if (buff[i] != NULL)
+ kfree(buff[i]);
+ }
+ return status;
+ }
+#endif //PASSTHROUGH
+
+ default:
+ return -EBADRQC;
+ }
+
+#endif
+
+}
+
+/* Borrowed and adapted from sd.c */
+static int revalidate_logvol(kdev_t dev, int maxusage)
+{
+ int ctlr, target;
+ struct gendisk *gdev;
+ unsigned long flags;
+ int max_p;
+ int start;
+ int i;
+
+ target = MINOR(dev) >> NWD_SHIFT;
+ ctlr = map_major_to_ctlr[MAJOR(dev)];
+ gdev = &(hba[ctlr]->gendisk);
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ if (hba[ctlr]->drv[target].usage_count > maxusage) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ printk(KERN_WARNING "cciss: Device busy for "
+ "revalidation (usage=%d)\n",
+ hba[ctlr]->drv[target].usage_count);
+ return -EBUSY;
+ }
+ hba[ctlr]->drv[target].usage_count++;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ max_p = gdev->max_p;
+ start = target << gdev->minor_shift;
+
+ for(i=max_p-1; i>=0; i--) {
+ int minor = start+i;
+ invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+
+ /* reset the blocksize so we can read the partition table */
+ blksize_size[hba[ctlr]->major][minor] = 1024;
+ }
+ /* setup partitions per disk */
+ grok_partitions(gdev, target, MAX_PART,
+ hba[ctlr]->drv[target].nr_blocks);
+ hba[ctlr]->drv[target].usage_count--;
+ return 0;
+}
+
+static int frevalidate_logvol(kdev_t dev)
+{
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss: frevalidate has been called\n");
+#endif /* CCISS_DEBUG */
+ return revalidate_logvol(dev, 0);
+}
+static int deregister_disk(int ctlr, int logvol)
+{
+ unsigned long flags;
+ struct gendisk *gdev = &(hba[ctlr]->gendisk);
+ ctlr_info_t *h = hba[ctlr];
+ int start, max_p, i;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ /* make sure logical volume is NOT is use */
+ if (h->drv[logvol].usage_count > 1 || h->busy_configuring) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return -EBUSY;
+ }
+ h->busy_configuring = 1;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ /* invalidate the devices and deregister the disk */
+ max_p = gdev->max_p;
+ start = logvol << gdev->minor_shift;
+ for (i=max_p-1; i>=0; i--) {
+ int minor = start+i;
+ /* printk("invalidating( %d %d)\n", ctlr, minor); */
+ invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
+ /* so open will now fail */
+ h->sizes[minor] = 0;
+ /* so it will no longer appear in /proc/partitions */
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+ }
+ /* check to see if it was the last disk */
+ if (logvol == h->highest_lun) {
+ /* if so, find the new hightest lun */
+ int i, newhighest =-1;
+ for(i=0; i<h->highest_lun; i++) {
+ /* if the disk has size > 0, it is available */
+ if (h->sizes[i << gdev->minor_shift] != 0)
+ newhighest = i;
+ }
+ h->highest_lun = newhighest;
+
+ }
+ --h->num_luns;
+ gdev->nr_real = h->highest_lun+1;
+ /* zero out the disk size info */
+ h->drv[logvol].nr_blocks = 0;
+ h->drv[logvol].block_size = 0;
+ h->drv[logvol].cylinders = 0;
+ h->drv[logvol].LunID = 0;
+ h->busy_configuring = 0;
+ return 0;
+}
+static int sendcmd_withirq(__u8 cmd,
+ int ctlr,
+ void *buff,
+ size_t size,
+ unsigned int use_unit_num,
+ unsigned int log_unit,
+ __u8 page_code,
+ __u8 cmdtype)
+{
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ u64bit buff_dma_handle;
+ unsigned long flags;
+ int return_status = IO_OK;
+#if 0
+ DECLARE_COMPLETION(wait);
+#else
+ /* XXX SMH: no waiting for us ... spin instead */
+ int wait = 1;
+ int usecs = 0;
+#endif
+
+ if ((c = cmd_alloc(h , 0)) == NULL)
+ return -ENOMEM;
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (buff != NULL) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal= 1;
+ } else {
+ /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal= 0;
+ }
+ c->Header.Tag.lower = c->busaddr; /* tag is phys addr of cmd */
+ /* Fill in Request block */
+ c->Request.CDB[0] = cmd;
+ c->Request.Type.Type = cmdtype;
+ if (cmdtype == TYPE_CMD) {
+ switch (cmd) {
+ case CISS_INQUIRY:
+ /* If the logical unit number is 0 then, this is going
+ to controller so It's a physical command
+ mode = 0 target = 0.
+ So we have nothing to write.
+ Otherwise
+ mode = 1 target = LUNID
+ */
+ if (use_unit_num != 0) {
+ c->Header.LUN.LogDev.VolId =
+ hba[ctlr]->drv[log_unit].LunID;
+ c->Header.LUN.LogDev.Mode = 1;
+ }
+ if (page_code != 0) {
+ c->Request.CDB[1] = 0x01;
+ c->Request.CDB[2] = page_code;
+ }
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[4] = size & 0xFF;
+ break;
+ case CISS_REPORT_LOG:
+ /* Talking to controller so It's a physical command
+ mode = 00 target = 0.
+ So we have nothing to write.
+ */
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+ case CCISS_READ_CAPACITY:
+ c->Header.LUN.LogDev.VolId=
+ hba[ctlr]->drv[log_unit].LunID;
+ c->Header.LUN.LogDev.Mode = 1;
+ c->Request.CDBLen = 10;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ break;
+ default:
+ printk(KERN_WARNING
+ "cciss: Unknown Command 0x%x sent attempted\n", cmd);
+ cmd_free(h, c, 1);
+ return IO_ERROR;
+ }
+ } else if (cmdtype == TYPE_MSG) {
+ switch (cmd) {
+ case 3: /* No-Op message */
+ c->Request.CDBLen = 1;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ break;
+ default:
+ printk(KERN_WARNING
+ "cciss%d: unknown message type %d\n",
+ ctlr, cmd);
+ cmd_free(h, c, 1);
+ return IO_ERROR;
+ }
+ } else {
+ printk(KERN_WARNING
+ "cciss%d: unknown command type %d\n", ctlr, cmdtype);
+ cmd_free(h, c, 1);
+ return IO_ERROR;
+ }
+
+ /* Fill in the scatter gather information */
+ if (size > 0) {
+ buff_dma_handle.val = (__u64) pci_map_single( h->pdev,
+ buff, size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
+ c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
+ c->SG[0].Len = size;
+ c->SG[0].Ext = 0; /* we are not chaining */
+ }
+
+resend_cmd2:
+
+#if 0
+ c->waiting = &wait;
+#else
+ /* XXX SMH: we spin instead of waiting... */
+ c->waiting = (void *)&wait;
+#endif
+
+ /* Put the request on the tail of the queue and send it */
+ spin_lock_irqsave(&io_request_lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+#if 0
+ wait_for_completion(&wait);
+#else
+ /* XXX SMH: spin instead of waiting on wait queue */
+ while(wait) {
+ do_softirq();
+ udelay(500);
+ usecs += 500;
+ if(usecs > 1000000) {
+ printk("cciss: still waiting...!\n");
+ usecs = 0;
+ }
+ }
+#endif
+
+
+ if (c->err_info->CommandStatus != 0) {
+ /* an error has occurred */
+ switch (c->err_info->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ printk(KERN_WARNING "cciss: cmd %p has "
+ " completed with errors\n", c);
+ if (c->err_info->ScsiStatus) {
+ printk(KERN_WARNING "cciss: cmd %p "
+ "has SCSI Status = %x\n", c,
+ c->err_info->ScsiStatus);
+ }
+ break;
+ case CMD_DATA_UNDERRUN:
+ case CMD_DATA_OVERRUN:
+ /* expected for inquire and report lun commands */
+ break;
+ case CMD_INVALID:
+ printk(KERN_WARNING "cciss: cmd %p is "
+ "reported invalid\n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cmd %p has "
+ "protocol error \n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_HARDWARE_ERR:
+ printk(KERN_WARNING "cciss: cmd %p had "
+ " hardware error\n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cmd %p had "
+ "connection lost\n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cmd %p was "
+ "aborted\n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cmd %p reports "
+ "abort failed\n", c);
+ return_status = IO_ERROR;
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING "cciss: cmd %p aborted "
+ "do to an unsolicited abort\n", c);
+ if (c->retry_count < MAX_CMD_RETRIES)
+ {
+ printk(KERN_WARNING "retrying cmd\n");
+ c->retry_count++;
+ /* erase the old error */
+ /* information */
+ memset(c->err_info, 0,
+ sizeof(ErrorInfo_struct));
+ return_status = IO_OK;
+#if 0
+ INIT_COMPLETION(wait);
+#else
+ /* XXX SMH: spin instead of waiting. */
+ wait = 0;
+#endif
+ goto resend_cmd2;
+
+ }
+ return_status = IO_ERROR;
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cmd %p returned "
+ "unknown status %x\n", c,
+ c->err_info->CommandStatus);
+ return_status = IO_ERROR;
+ }
+ }
+
+ /* unlock the buffers from DMA */
+ pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
+ size, PCI_DMA_BIDIRECTIONAL);
+ cmd_free(h, c, 0);
+ return return_status;
+}
+static int register_new_disk(int ctlr, int opened_vol, __u64 requested_lun)
+{
+ struct gendisk *gdev = &(hba[ctlr]->gendisk);
+ ctlr_info_t *h = hba[ctlr];
+ int start, max_p, i;
+ int num_luns;
+ int logvol;
+ int new_lun_found = 0;
+ int new_lun_index = 0;
+ int free_index_found = 0;
+ int free_index = 0;
+ ReportLunData_struct *ld_buff;
+ ReadCapdata_struct *size_buff;
+ InquiryData_struct *inq_buff;
+ int return_code;
+ int listlength = 0;
+ __u32 lunid = 0;
+ unsigned int block_size;
+ unsigned int total_size;
+ unsigned long flags;
+ int req_lunid = (int) (requested_lun & (__u64) 0xffffffff);
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ /* if we have no space in our disk array left to add anything */
+ spin_lock_irqsave(&io_request_lock, flags);
+ if (h->num_luns >= CISS_MAX_LUN) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return -EINVAL;
+ }
+ if (h->busy_configuring) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return -EBUSY;
+ }
+ h->busy_configuring = 1;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+ if (ld_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ h->busy_configuring = 0;
+ return -ENOMEM;
+ }
+ memset(ld_buff, 0, sizeof(ReportLunData_struct));
+ size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+ if (size_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ h->busy_configuring = 0;
+ return -ENOMEM;
+ }
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ kfree(size_buff);
+ h->busy_configuring = 0;
+ return -ENOMEM;
+ }
+
+ return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
+ sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
+
+ if (return_code == IO_OK) {
+ listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
+ } else {
+ /* reading number of logical volumes failed */
+ printk(KERN_WARNING "cciss: report logical volume"
+ " command failed\n");
+ listlength = 0;
+ h->busy_configuring = 0;
+ return -1;
+ }
+ num_luns = listlength / 8; /* 8 bytes pre entry */
+ if (num_luns > CISS_MAX_LUN)
+ num_luns = CISS_MAX_LUN;
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
+ ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
+ ld_buff->LUNListLength[3], num_luns);
+#endif
+ for(i=0; i< num_luns; i++) {
+ int j;
+ int lunID_found = 0;
+
+ lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
+ lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
+
+ /* check to see if this is a new lun */
+ for(j=0; j <= h->highest_lun; j++) {
+#ifdef CCISS_DEBUG
+ printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
+ lunid);
+#endif /* CCISS_DEBUG */
+ if (h->drv[j].LunID == lunid) {
+ lunID_found = 1;
+ break;
+ }
+
+ }
+ if (lunID_found == 1)
+ continue;
+ else { /* new lun found */
+
+#ifdef CCISS_DEBUG
+ printk("new lun found at %d\n", i);
+#endif /* CCISS_DEBUG */
+ if (req_lunid) /* we are looking for a specific lun */
+ {
+ if (lunid != req_lunid)
+ {
+#ifdef CCISS_DEBUG
+ printk("new lun %x is not %x\n",
+ lunid, req_lunid);
+#endif /* CCISS_DEBUG */
+ continue;
+ }
+ }
+ new_lun_index = i;
+ new_lun_found = 1;
+ break;
+ }
+ }
+ if (!new_lun_found) {
+ printk(KERN_DEBUG "cciss: New Logical Volume not found\n");
+ h->busy_configuring = 0;
+ return -1;
+ }
+ /* Now find the free index */
+ for(i=0; i <CISS_MAX_LUN; i++) {
+#ifdef CCISS_DEBUG
+ printk("Checking Index %d\n", i);
+#endif /* CCISS_DEBUG */
+ if (hba[ctlr]->drv[i].LunID == 0) {
+#ifdef CCISS_DEBUG
+ printk("free index found at %d\n", i);
+#endif /* CCISS_DEBUG */
+ free_index_found = 1;
+ free_index = i;
+ break;
+ }
+ }
+ if (!free_index_found) {
+ printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
+ h->busy_configuring = 0;
+ return -1;
+ }
+
+ logvol = free_index;
+ hba[ctlr]->drv[logvol].LunID = lunid;
+ /* there could be gaps in lun numbers, track hightest */
+ if (hba[ctlr]->highest_lun < logvol)
+ hba[ctlr]->highest_lun = logvol;
+
+ memset(size_buff, 0, sizeof(ReadCapdata_struct));
+ return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr,
+ size_buff, sizeof(ReadCapdata_struct), 1,
+ logvol, 0, TYPE_CMD);
+ if (return_code == IO_OK) {
+ total_size = (0xff &
+ (unsigned int) size_buff->total_size[0]) << 24;
+ total_size |= (0xff &
+ (unsigned int) size_buff->total_size[1]) << 16;
+ total_size |= (0xff &
+ (unsigned int) size_buff->total_size[2]) << 8;
+ total_size |= (0xff &
+ (unsigned int) size_buff->total_size[3]);
+ total_size++; /* command returns highest block address */
+
+ block_size = (0xff &
+ (unsigned int) size_buff->block_size[0]) << 24;
+ block_size |= (0xff &
+ (unsigned int) size_buff->block_size[1]) << 16;
+ block_size |= (0xff &
+ (unsigned int) size_buff->block_size[2]) << 8;
+ block_size |= (0xff &
+ (unsigned int) size_buff->block_size[3]);
+ } else {
+ /* read capacity command failed */
+ printk(KERN_WARNING "cciss: read capacity failed\n");
+ total_size = 0;
+ block_size = BLOCK_SIZE;
+ }
+ printk(KERN_INFO " blocks= %d block_size= %d\n",
+ total_size, block_size);
+ /* Execute the command to read the disk geometry */
+ memset(inq_buff, 0, sizeof(InquiryData_struct));
+ return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
+ sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
+ if (return_code == IO_OK) {
+ if (inq_buff->data_byte[8] == 0xFF) {
+ printk(KERN_WARNING
+ "cciss: reading geometry failed, "
+ "volume does not support reading geometry\n");
+
+ hba[ctlr]->drv[logvol].block_size = block_size;
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = 255;
+ hba[ctlr]->drv[logvol].sectors = 32; /* secs/trk */
+ hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
+ hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
+ } else {
+ hba[ctlr]->drv[logvol].block_size = block_size;
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
+ hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
+ hba[ctlr]->drv[logvol].cylinders =
+ (inq_buff->data_byte[4] & 0xff) << 8;
+ hba[ctlr]->drv[logvol].cylinders +=
+ inq_buff->data_byte[5];
+ hba[ctlr]->drv[logvol].raid_level =
+ inq_buff->data_byte[8];
+ }
+ } else {
+ /* Get geometry failed */
+ printk(KERN_WARNING "cciss: reading geometry failed, "
+ "continuing with default geometry\n");
+
+ hba[ctlr]->drv[logvol].block_size = block_size;
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = 255;
+ hba[ctlr]->drv[logvol].sectors = 32; /* Sectors per track */
+ hba[ctlr]->drv[logvol].cylinders = total_size / 255 / 32;
+ }
+ if (hba[ctlr]->drv[logvol].raid_level > 5)
+ hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
+ printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
+ hba[ctlr]->drv[logvol].heads,
+ hba[ctlr]->drv[logvol].sectors,
+ hba[ctlr]->drv[logvol].cylinders,
+ raid_label[hba[ctlr]->drv[logvol].raid_level]);
+
+ /* special case for c?d0, which may be opened even when
+ it does not "exist". In that case, don't mess with usage count.
+ Also, /dev/c1d1 could be used to re-add c0d0 so we can't just
+ check whether logvol == 0, must check logvol != opened_vol */
+ if (logvol != opened_vol)
+ hba[ctlr]->drv[logvol].usage_count = 0;
+
+ max_p = gdev->max_p;
+ start = logvol<< gdev->minor_shift;
+ hba[ctlr]->hd[start].nr_sects = total_size;
+ hba[ctlr]->sizes[start] = total_size;
+
+ for(i=max_p-1; i>=0; i--) {
+ int minor = start+i;
+ invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+
+ /* reset the blocksize so we can read the partition table */
+ blksize_size[hba[ctlr]->major][minor] = block_size;
+ hba[ctlr]->hardsizes[minor] = block_size;
+ }
+
+ ++hba[ctlr]->num_luns;
+ gdev->nr_real = hba[ctlr]->highest_lun + 1;
+ /* setup partitions per disk */
+ grok_partitions(gdev, logvol, MAX_PART,
+ hba[ctlr]->drv[logvol].nr_blocks);
+ kfree(ld_buff);
+ kfree(size_buff);
+ kfree(inq_buff);
+ h->busy_configuring = 0;
+ return logvol;
+}
+
+static int cciss_rescan_disk(int ctlr, int logvol)
+{
+ struct gendisk *gdev = &(hba[ctlr]->gendisk);
+ int start, max_p, i;
+ ReadCapdata_struct *size_buff;
+ InquiryData_struct *inq_buff;
+ int return_code;
+ unsigned int block_size;
+ unsigned int total_size;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (hba[ctlr]->sizes[logvol << NWD_SHIFT] != 0) {
+ /* disk is possible on line, return just a warning */
+ return 1;
+ }
+ size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+ if (size_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ return -1;
+ }
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(size_buff);
+ return -1;
+ }
+ memset(size_buff, 0, sizeof(ReadCapdata_struct));
+ return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff,
+ sizeof( ReadCapdata_struct), 1, logvol, 0,
+ TYPE_CMD);
+ if (return_code == IO_OK) {
+ total_size = (0xff &
+ (unsigned int)(size_buff->total_size[0])) << 24;
+ total_size |= (0xff &
+ (unsigned int)(size_buff->total_size[1])) << 16;
+ total_size |= (0xff &
+ (unsigned int)(size_buff->total_size[2])) << 8;
+ total_size |= (0xff & (unsigned int)
+ (size_buff->total_size[3]));
+ total_size++; /* command returns highest block address */
+
+ block_size = (0xff &
+ (unsigned int)(size_buff->block_size[0])) << 24;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[1])) << 16;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[2])) << 8;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[3]));
+ } else { /* read capacity command failed */
+ printk(KERN_WARNING "cciss: read capacity failed\n");
+ total_size = block_size = 0;
+ }
+ printk(KERN_INFO " blocks= %d block_size= %d\n",
+ total_size, block_size);
+ /* Execute the command to read the disk geometry */
+ memset(inq_buff, 0, sizeof(InquiryData_struct));
+ return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
+ sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
+ if (return_code == IO_OK) {
+ if (inq_buff->data_byte[8] == 0xFF) {
+ printk(KERN_WARNING "cciss: reading geometry failed, "
+ "volume does not support reading geometry\n");
+
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = 255;
+ hba[ctlr]->drv[logvol].sectors = 32; /* Sectors/track */
+ hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
+ } else {
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
+ hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
+ hba[ctlr]->drv[logvol].cylinders =
+ (inq_buff->data_byte[4] & 0xff) << 8;
+ hba[ctlr]->drv[logvol].cylinders +=
+ inq_buff->data_byte[5];
+ }
+ } else { /* Get geometry failed */
+ printk(KERN_WARNING "cciss: reading geometry failed, "
+ "continuing with default geometry\n");
+
+ hba[ctlr]->drv[logvol].nr_blocks = total_size;
+ hba[ctlr]->drv[logvol].heads = 255;
+ hba[ctlr]->drv[logvol].sectors = 32; /* Sectors / track */
+ hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
+ }
+
+ printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d \n\n",
+ hba[ctlr]->drv[logvol].heads,
+ hba[ctlr]->drv[logvol].sectors,
+ hba[ctlr]->drv[logvol].cylinders);
+ max_p = gdev->max_p;
+ start = logvol<< gdev->minor_shift;
+ hba[ctlr]->hd[start].nr_sects = hba[ctlr]->sizes[start]= total_size;
+
+ for (i=max_p-1; i>=0; i--) {
+ int minor = start+i;
+ invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+
+ /* reset the blocksize so we can read the partition table */
+ blksize_size[hba[ctlr]->major][minor] = block_size;
+ hba[ctlr]->hardsizes[minor] = block_size;
+ }
+
+ /* setup partitions per disk */
+ grok_partitions(gdev, logvol, MAX_PART,
+ hba[ctlr]->drv[logvol].nr_blocks );
+
+ kfree(size_buff);
+ kfree(inq_buff);
+ return 0;
+}
+/*
+ * Wait polling for a command to complete.
+ * The memory mapped FIFO is polled for the completion.
+ * Used only at init time, interrupts disabled.
+ */
+static unsigned long pollcomplete(int ctlr)
+{
+ unsigned long done;
+ int i;
+
+ /* Wait (up to 20 seconds) for a command to complete */
+
+ for (i = 20 * HZ; i > 0; i--) {
+ done = hba[ctlr]->access.command_completed(hba[ctlr]);
+ if (done == FIFO_EMPTY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ } else
+ return done;
+ }
+ /* Invalid address to tell caller we ran out of time */
+ return 1;
+}
+/*
+ * Send a command to the controller, and wait for it to complete.
+ * Only used at init time.
+ */
+static int sendcmd(
+ __u8 cmd,
+ int ctlr,
+ void *buff,
+ size_t size,
+ unsigned int use_unit_num, /* 0: address the controller,
+ 1: address logical volume log_unit,
+ 2: periph device address is scsi3addr */
+ unsigned int log_unit,
+ __u8 page_code,
+ unsigned char *scsi3addr)
+{
+ CommandList_struct *c;
+ int i;
+ unsigned long complete;
+ ctlr_info_t *info_p= hba[ctlr];
+ u64bit buff_dma_handle;
+ int status = IO_OK;
+
+ c = cmd_alloc(info_p, 1);
+ if (c == NULL) {
+ printk(KERN_WARNING "cciss: unable to get memory");
+ return IO_ERROR;
+ }
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (buff != NULL) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal= 1;
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal= 0;
+ }
+ c->Header.Tag.lower = c->busaddr; /* use the kernel address */
+ /* the cmd block for tag */
+ /* Fill in Request block */
+ switch (cmd) {
+ case CISS_INQUIRY:
+ /* If the logical unit number is 0 then, this is going
+ to controller so It's a physical command
+ mode = 0 target = 0.
+ So we have nothing to write.
+ otherwise, if use_unit_num == 1,
+ mode = 1(volume set addressing) target = LUNID
+ otherwise, if use_unit_num == 2,
+ mode = 0(periph dev addr) target = scsi3addr
+ */
+ if (use_unit_num == 1) {
+ c->Header.LUN.LogDev.VolId=
+ hba[ctlr]->drv[log_unit].LunID;
+ c->Header.LUN.LogDev.Mode = 1;
+ }
+ else if (use_unit_num == 2) {
+ memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
+ c->Header.LUN.LogDev.Mode = 0;
+ /* phys dev addr */
+ }
+
+ /* are we trying to read a vital product page */
+ if (page_code != 0) {
+ c->Request.CDB[1] = 0x01;
+ c->Request.CDB[2] = page_code;
+ }
+ c->Request.CDBLen = 6;
+ c->Request.Type.Type = TYPE_CMD; /* It is a command. */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = CISS_INQUIRY;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
+ case CISS_REPORT_LOG:
+ case CISS_REPORT_PHYS:
+ /* Talking to controller so It's a physical command
+ mode = 00 target = 0.
+ So we have nothing to write.
+ */
+ c->Request.CDBLen = 12;
+ c->Request.Type.Type = TYPE_CMD; /* It is a command. */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+
+ case CCISS_READ_CAPACITY:
+ c->Header.LUN.LogDev.VolId=
+ hba[ctlr]->drv[log_unit].LunID;
+ c->Header.LUN.LogDev.Mode = 1;
+ c->Request.CDBLen = 10;
+ c->Request.Type.Type = TYPE_CMD; /* It is a command. */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ; /* Read */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = CCISS_READ_CAPACITY;
+ break;
+ case CCISS_CACHE_FLUSH:
+ c->Request.CDBLen = 12;
+ c->Request.Type.Type = TYPE_CMD; /* It is a command. */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_WRITE; /* No data */
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = BMIC_WRITE; /* BMIC Passthru */
+ c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ break;
+ default:
+ printk(KERN_WARNING
+ "cciss: Unknown Command 0x%x sent attempted\n",
+ cmd);
+ cmd_free(info_p, c, 1);
+ return IO_ERROR;
+ };
+ /* Fill in the scatter gather information */
+ if (size > 0) {
+ buff_dma_handle.val = (__u64) pci_map_single( info_p->pdev,
+ buff, size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
+ c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
+ c->SG[0].Len = size;
+ c->SG[0].Ext = 0; /* we are not chaining */
+ }
+resend_cmd1:
+ /*
+ * Disable interrupt
+ */
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss: turning intr off\n");
+#endif /* CCISS_DEBUG */
+ info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
+
+ /* Make sure there is room in the command FIFO */
+ /* Actually it should be completely empty at this time. */
+ for (i = 200000; i > 0; i--) {
+ /* if fifo isn't full go */
+ if (!(info_p->access.fifo_full(info_p))) {
+
+ break;
+ }
+ udelay(10);
+ printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
+ " waiting!\n", ctlr);
+ }
+ /*
+ * Send the cmd
+ */
+ info_p->access.submit_command(info_p, c);
+ complete = pollcomplete(ctlr);
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss: command completed\n");
+#endif /* CCISS_DEBUG */
+
+ if (complete != 1) {
+ if ( (complete & CISS_ERROR_BIT)
+ && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
+ /* if data overrun or underun on Report command
+ ignore it
+ */
+ if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+ (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+ (c->Request.CDB[0] == CISS_INQUIRY)) &&
+ ((c->err_info->CommandStatus ==
+ CMD_DATA_OVERRUN) ||
+ (c->err_info->CommandStatus ==
+ CMD_DATA_UNDERRUN)
+ )) {
+ complete = c->busaddr;
+ } else {
+ if (c->err_info->CommandStatus ==
+ CMD_UNSOLICITED_ABORT) {
+ printk(KERN_WARNING "cciss: "
+ "cmd %p aborted do "
+ "to an unsolicited abort \n", c);
+ if (c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING
+ "retrying cmd\n");
+ c->retry_count++;
+ /* erase the old error */
+ /* information */
+ memset(c->err_info, 0,
+ sizeof(ErrorInfo_struct));
+ goto resend_cmd1;
+ } else {
+ printk(KERN_WARNING
+ "retried to many times\n");
+ status = IO_ERROR;
+ goto cleanup1;
+ }
+ }
+ printk(KERN_WARNING "cciss cciss%d: sendcmd"
+ " Error %x \n", ctlr,
+ c->err_info->CommandStatus);
+ printk(KERN_WARNING "cciss cciss%d: sendcmd"
+ " offensive info\n"
+ " size %x\n num %x value %x\n", ctlr,
+ c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
+ c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
+ c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
+ status = IO_ERROR;
+ goto cleanup1;
+ }
+ }
+ if (complete != c->busaddr) {
+ printk( KERN_WARNING "cciss cciss%d: SendCmd "
+ "Invalid command list address returned! (%lx)\n",
+ ctlr, complete);
+ status = IO_ERROR;
+ goto cleanup1;
+ }
+ } else {
+ printk( KERN_WARNING
+ "cciss cciss%d: SendCmd Timeout out, "
+ "No command list address returned!\n",
+ ctlr);
+ status = IO_ERROR;
+ }
+
+cleanup1:
+ /* unlock the data buffer from DMA */
+ pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
+ size, PCI_DMA_BIDIRECTIONAL);
+ cmd_free(info_p, c, 1);
+ return status;
+}
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static ulong remap_pci_mem(ulong base, ulong size)
+{
+ ulong page_base = ((ulong) base) & PAGE_MASK;
+ ulong page_offs = ((ulong) base) - page_base;
+ ulong page_remapped = (ulong) ioremap(page_base, page_offs+size);
+
+ return (ulong) (page_remapped ? (page_remapped + page_offs) : 0UL);
+}
+
+/*
+ * Enqueuing and dequeuing functions for cmdlists.
+ */
+static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
+{
+ if (*Qptr == NULL) {
+ *Qptr = c;
+ c->next = c->prev = c;
+ } else {
+ c->prev = (*Qptr)->prev;
+ c->next = (*Qptr);
+ (*Qptr)->prev->next = c;
+ (*Qptr)->prev = c;
+ }
+}
+
+static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
+ CommandList_struct *c)
+{
+ if (c && c->next != c) {
+ if (*Qptr == c) *Qptr = c->next;
+ c->prev->next = c->next;
+ c->next->prev = c->prev;
+ } else {
+ *Qptr = NULL;
+ }
+ return c;
+}
+
+/*
+ * Takes jobs of the Q and sends them to the hardware, then puts it on
+ * the Q to wait for completion.
+ */
+static void start_io( ctlr_info_t *h)
+{
+ CommandList_struct *c;
+
+ while(( c = h->reqQ) != NULL ) {
+ /* can't do anything if fifo is full */
+ if ((h->access.fifo_full(h))) {
+ printk(KERN_WARNING "cciss: fifo full \n");
+ return;
+ }
+ /* Get the frist entry from the Request Q */
+ removeQ(&(h->reqQ), c);
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+ h->access.submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ (&(h->cmpQ), c);
+ }
+}
+
+static inline void complete_buffers( struct buffer_head *bh, int status)
+{
+ struct buffer_head *xbh;
+
+ while(bh) {
+ xbh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ blk_finished_io(bh->b_size >> 9);
+ bh->b_end_io(bh, status);
+ bh = xbh;
+ }
+}
+/* This code assumes io_request_lock is already held */
+/* Zeros out the error record and then resends the command back */
+/* to the controller */
+static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
+{
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+
+ /* add it to software queue and then send it to the controller */
+ addQ(&(h->reqQ),c);
+ h->Qdepth++;
+ if (h->Qdepth > h->maxQsinceinit)
+ h->maxQsinceinit = h->Qdepth;
+
+ start_io(h);
+}
+/* checks the status of the job and calls complete buffers to mark all
+ * buffers for the completed job.
+ */
+static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
+ int timeout)
+{
+ int status = 1;
+ int retry_cmd = 0;
+ int i, ddir;
+ u64bit temp64;
+
+ if (timeout)
+ status = 0;
+
+ if (cmd->err_info->CommandStatus != 0) {
+ /* an error has occurred */
+ switch (cmd->err_info->CommandStatus) {
+ unsigned char sense_key;
+ case CMD_TARGET_STATUS:
+ status = 0;
+
+ if (cmd->err_info->ScsiStatus == 0x02) {
+ printk(KERN_WARNING "cciss: cmd %p "
+ "has CHECK CONDITION,"
+ " sense key = 0x%x\n", cmd,
+ cmd->err_info->SenseInfo[2]);
+ /* check the sense key */
+ sense_key = 0xf &
+ cmd->err_info->SenseInfo[2];
+ /* recovered error */
+ if ( sense_key == 0x1)
+ status = 1;
+ } else {
+ printk(KERN_WARNING "cciss: cmd %p "
+ "has SCSI Status 0x%x\n",
+ cmd, cmd->err_info->ScsiStatus);
+ }
+ break;
+ case CMD_DATA_UNDERRUN:
+ printk(KERN_WARNING "cciss: cmd %p has"
+ " completed with data underrun "
+ "reported\n", cmd);
+ break;
+ case CMD_DATA_OVERRUN:
+ printk(KERN_WARNING "cciss: cmd %p has"
+ " completed with data overrun "
+ "reported\n", cmd);
+ break;
+ case CMD_INVALID:
+ printk(KERN_WARNING "cciss: cmd %p is "
+ "reported invalid\n", cmd);
+ status = 0;
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cmd %p has "
+ "protocol error \n", cmd);
+ status = 0;
+ break;
+ case CMD_HARDWARE_ERR:
+ printk(KERN_WARNING "cciss: cmd %p had "
+ " hardware error\n", cmd);
+ status = 0;
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cmd %p had "
+ "connection lost\n", cmd);
+ status=0;
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cmd %p was "
+ "aborted\n", cmd);
+ status=0;
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cmd %p reports "
+ "abort failed\n", cmd);
+ status=0;
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING "cciss: cmd %p aborted do "
+ "to an unsolicited abort \n",
+ cmd);
+ if (cmd->retry_count < MAX_CMD_RETRIES) {
+ retry_cmd=1;
+ printk(KERN_WARNING
+ "retrying cmd\n");
+ cmd->retry_count++;
+ } else {
+ printk(KERN_WARNING
+ "retried to many times\n");
+ }
+ status=0;
+ break;
+ case CMD_TIMEOUT:
+ printk(KERN_WARNING "cciss: cmd %p timedout\n",
+ cmd);
+ status=0;
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cmd %p returned "
+ "unknown status %x\n", cmd,
+ cmd->err_info->CommandStatus);
+ status=0;
+ }
+ }
+ /* We need to return this command */
+ if (retry_cmd) {
+ resend_cciss_cmd(h,cmd);
+ return;
+ }
+ /* command did not need to be retried */
+ /* unmap the DMA mapping for all the scatter gather elements */
+ if (cmd->Request.Type.Direction == XFER_READ)
+ ddir = PCI_DMA_FROMDEVICE;
+ else
+ ddir = PCI_DMA_TODEVICE;
+ for(i=0; i<cmd->Header.SGList; i++) {
+ temp64.val32.lower = cmd->SG[i].Addr.lower;
+ temp64.val32.upper = cmd->SG[i].Addr.upper;
+ pci_unmap_page(hba[cmd->ctlr]->pdev,
+ temp64.val, cmd->SG[i].Len, ddir);
+ }
+ complete_buffers(cmd->rq->bh, status);
+#ifdef CCISS_DEBUG
+ printk("Done with %p\n", cmd->rq);
+#endif /* CCISS_DEBUG */
+ end_that_request_last(cmd->rq);
+ cmd_free(h,cmd,1);
+}
+
+
+static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
+ int max_segments)
+{
+ if (rq->nr_segments < MAXSGENTRIES) {
+ rq->nr_segments++;
+ return 1;
+ }
+ return 0;
+}
+
+static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segments)
+{
+ if (blk_seg_merge_ok(rq->bhtail, bh))
+ return 1;
+ return cpq_new_segment(q, rq, max_segments);
+}
+
+static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segments)
+{
+ if (blk_seg_merge_ok(bh, rq->bh))
+ return 1;
+ return cpq_new_segment(q, rq, max_segments);
+}
+
+static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
+ struct request *nxt, int max_segments)
+{
+ int total_segments = rq->nr_segments + nxt->nr_segments;
+
+ if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
+ total_segments--;
+
+ if (total_segments > MAXSGENTRIES)
+ return 0;
+
+ rq->nr_segments = total_segments;
+ return 1;
+}
+
+/*
+ * Get a request and submit it to the controller.
+ * Currently we do one request at a time. Ideally we would like to send
+ * everything to the controller on the first call, but there is a danger
+ * of holding the io_request_lock for to long.
+ */
+static void do_cciss_request(request_queue_t *q)
+{
+ ctlr_info_t *h= q->queuedata;
+ CommandList_struct *c;
+ int log_unit, start_blk, seg;
+ unsigned long long lastdataend;
+ struct buffer_head *bh;
+ struct list_head *queue_head = &q->queue_head;
+ struct request *creq;
+ u64bit temp64;
+ struct scatterlist tmp_sg[MAXSGENTRIES];
+ int i, ddir;
+
+ if (q->plugged)
+ goto startio;
+
+next:
+ if (list_empty(queue_head))
+ goto startio;
+
+ creq = blkdev_entry_next_request(queue_head);
+ if (creq->nr_segments > MAXSGENTRIES)
+ BUG();
+
+ if( h->ctlr != map_major_to_ctlr[MAJOR(creq->rq_dev)] ) {
+ printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
+ h->ctlr, creq->rq_dev, creq);
+ blkdev_dequeue_request(creq);
+ complete_buffers(creq->bh, 0);
+ end_that_request_last(creq);
+ goto startio;
+ }
+
+ /* make sure controller is alive. */
+ if (!CTLR_IS_ALIVE(h)) {
+ printk(KERN_WARNING "cciss%d: I/O quit ", h->ctlr);
+ blkdev_dequeue_request(creq);
+ complete_buffers(creq->bh, 0);
+ end_that_request_last(creq);
+ return;
+ }
+
+ if (( c = cmd_alloc(h, 1)) == NULL)
+ goto startio;
+
+ blkdev_dequeue_request(creq);
+
+ spin_unlock_irq(&io_request_lock);
+
+ c->cmd_type = CMD_RWREQ;
+ c->rq = creq;
+ bh = creq->bh;
+
+ /* fill in the request */
+ log_unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ c->Header.Tag.lower = c->busaddr; /* use the physical address */
+ /* the cmd block for tag */
+ c->Header.LUN.LogDev.VolId= hba[h->ctlr]->drv[log_unit].LunID;
+ c->Header.LUN.LogDev.Mode = 1;
+ c->Request.CDBLen = 10; /* 12 byte commands not in FW yet. */
+ c->Request.Type.Type = TYPE_CMD; /* It is a command. */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction =
+ (creq->cmd == READ) ? XFER_READ: XFER_WRITE;
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE;
+ start_blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect + creq->sector;
+#ifdef CCISS_DEBUG
+ if (bh == NULL)
+ panic("cciss: bh== NULL?");
+ printk(KERN_DEBUG "cciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
+ (int) creq->nr_sectors);
+#endif /* CCISS_DEBUG */
+ seg = 0;
+ lastdataend = ~0ULL;
+ while(bh) {
+// if (bh_phys(bh) == lastdataend)
+ if ((page_to_phys(bh->b_page) + bh_offset(bh))== lastdataend)
+ { /* tack it on to the last segment */
+ tmp_sg[seg-1].length +=bh->b_size;
+ lastdataend += bh->b_size;
+ } else {
+ if (seg == MAXSGENTRIES)
+ BUG();
+ tmp_sg[seg].page = bh->b_page;
+ tmp_sg[seg].length = bh->b_size;
+ tmp_sg[seg].offset = bh_offset(bh);
+ lastdataend = bh_phys(bh) + bh->b_size;
+ seg++;
+ }
+ bh = bh->b_reqnext;
+ }
+
+ /* get the DMA records for the setup */
+ if (c->Request.Type.Direction == XFER_READ)
+ ddir = PCI_DMA_FROMDEVICE;
+ else
+ ddir = PCI_DMA_TODEVICE;
+ for (i=0; i<seg; i++) {
+ c->SG[i].Len = tmp_sg[i].length;
+ temp64.val = pci_map_page(h->pdev, tmp_sg[i].page,
+ tmp_sg[i].offset, tmp_sg[i].length, ddir);
+ c->SG[i].Addr.lower = temp64.val32.lower;
+ c->SG[i].Addr.upper = temp64.val32.upper;
+ c->SG[i].Ext = 0; /* we are not chaining */
+ }
+ /* track how many SG entries we are using */
+ if (seg > h->maxSG)
+ h->maxSG = seg;
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", sect, seg);
+#endif /* CCISS_DEBUG */
+
+ c->Header.SGList = c->Header.SGTotal = seg;
+ c->Request.CDB[1]= 0;
+ c->Request.CDB[2]= (start_blk >> 24) & 0xff; /* MSB */
+ c->Request.CDB[3]= (start_blk >> 16) & 0xff;
+ c->Request.CDB[4]= (start_blk >> 8) & 0xff;
+ c->Request.CDB[5]= start_blk & 0xff;
+ c->Request.CDB[6]= 0; /* (sect >> 24) & 0xff; MSB */
+ c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
+ c->Request.CDB[8]= creq->nr_sectors & 0xff;
+ c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
+
+ spin_lock_irq(&io_request_lock);
+
+ addQ(&(h->reqQ),c);
+ h->Qdepth++;
+ if (h->Qdepth > h->maxQsinceinit)
+ h->maxQsinceinit = h->Qdepth;
+
+ goto next;
+
+startio:
+ start_io(h);
+}
+
+static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ ctlr_info_t *h = dev_id;
+ CommandList_struct *c;
+ unsigned long flags;
+ __u32 a, a1;
+
+
+ /* Is this interrupt for us? */
+ if ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
+ return;
+
+ /*
+ * If there are completed commands in the completion queue,
+ * we had better do something about it.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+ while( h->access.intr_pending(h)) {
+ while((a = h->access.command_completed(h)) != FIFO_EMPTY) {
+ a1 = a;
+ a &= ~3;
+ if ((c = h->cmpQ) == NULL) {
+ printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
+ continue;
+ }
+ while(c->busaddr != a) {
+ c = c->next;
+ if (c == h->cmpQ)
+ break;
+ }
+ /*
+ * If we've found the command, take it off the
+ * completion Q and free it
+ */
+ if (c->busaddr == a) {
+ removeQ(&h->cmpQ, c);
+ if (c->cmd_type == CMD_RWREQ) {
+ complete_command(h, c, 0);
+ } else if (c->cmd_type == CMD_IOCTL_PEND) {
+#if 0
+ complete(c->waiting);
+#else
+ /* XXX SMH: use a flag to signal */
+ if(*(int *)(c->waiting) != 0)
+ *(int *)(c->waiting) = 0;
+#endif
+ }
+# ifdef CONFIG_CISS_SCSI_TAPE
+ else if (c->cmd_type == CMD_SCSI) {
+ complete_scsi_command(c, 0, a1);
+ }
+# endif
+ continue;
+ }
+ }
+ }
+ /*
+ * See if we can queue up some more IO
+ */
+ do_cciss_request(BLK_DEFAULT_QUEUE(h->major));
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+/*
+ * We cannot read the structure directly, for portablity we must use
+ * the io functions.
+ * This is for debug only.
+ */
+#ifdef CCISS_DEBUG
+static void print_cfg_table( CfgTable_struct *tb)
+{
+ int i;
+ char temp_name[17];
+
+ printk("Controller Configuration information\n");
+ printk("------------------------------------\n");
+ for(i=0;i<4;i++)
+ temp_name[i] = readb(&(tb->Signature[i]));
+ temp_name[4]='\0';
+ printk(" Signature = %s\n", temp_name);
+ printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
+ printk(" Transport methods supported = 0x%x\n",
+ readl(&(tb-> TransportSupport)));
+ printk(" Transport methods active = 0x%x\n",
+ readl(&(tb->TransportActive)));
+ printk(" Requested transport Method = 0x%x\n",
+ readl(&(tb->HostWrite.TransportRequest)));
+ printk(" Coalese Interrupt Delay = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntDelay)));
+ printk(" Coalese Interrupt Count = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntCount)));
+ printk(" Max outstanding commands = 0x%d\n",
+ readl(&(tb->CmdsOutMax)));
+ printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
+ for(i=0;i<16;i++)
+ temp_name[i] = readb(&(tb->ServerName[i]));
+ temp_name[16] = '\0';
+ printk(" Server Name = %s\n", temp_name);
+ printk(" Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
+}
+#endif /* CCISS_DEBUG */
+
+static void release_io_mem(ctlr_info_t *c)
+{
+ /* if IO mem was not protected do nothing */
+ if (c->io_mem_addr == 0)
+ return;
+ release_region(c->io_mem_addr, c->io_mem_length);
+ c->io_mem_addr = 0;
+ c->io_mem_length = 0;
+}
+static int find_PCI_BAR_index(struct pci_dev *pdev,
+ unsigned long pci_bar_addr)
+{
+ int i, offset, mem_type, bar_type;
+ if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
+ return 0;
+ offset = 0;
+ for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
+ bar_type = pci_resource_flags(pdev, i) &
+ PCI_BASE_ADDRESS_SPACE;
+ if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+ offset += 4;
+ else {
+ mem_type = pci_resource_flags(pdev, i) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ offset += 4; /* 32 bit */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ offset += 8;
+ break;
+ default: /* reserved in PCI 2.2 */
+ printk(KERN_WARNING "Base address is invalid\n");
+ return -1;
+ break;
+ }
+ }
+ if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+ return i+1;
+ }
+ return -1;
+}
+
+static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+{
+ ushort subsystem_vendor_id, subsystem_device_id, command;
+ unchar irq = pdev->irq;
+ __u32 board_id;
+ __u64 cfg_offset;
+ __u32 cfg_base_addr;
+ __u64 cfg_base_addr_index;
+ int i;
+
+ /* check to see if controller has been disabled */
+ /* BEFORE we try to enable it */
+ (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
+ if (!(command & 0x02)) {
+ printk(KERN_WARNING "cciss: controller appears to be disabled\n");
+ return -1;
+ }
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
+ return -1;
+ }
+ if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0) {
+ printk(KERN_ERR "cciss: Unable to set DMA mask\n");
+ return -1;
+ }
+
+ subsystem_vendor_id = pdev->subsystem_vendor;
+ subsystem_device_id = pdev->subsystem_device;
+ board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id );
+
+
+ /* search for our IO range so we can protect it */
+ for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
+ /* is this an IO range */
+ if (pci_resource_flags(pdev, i) & 0x01) {
+ c->io_mem_addr = pci_resource_start(pdev, i);
+ c->io_mem_length = pci_resource_end(pdev, i) -
+ pci_resource_start(pdev, i) + 1;
+#ifdef CCISS_DEBUG
+ printk("IO value found base_addr[%d] %lx %lx\n", i,
+ c->io_mem_addr, c->io_mem_length);
+#endif /* CCISS_DEBUG */
+ /* register the IO range */
+ if (!request_region( c->io_mem_addr,
+ c->io_mem_length, "cciss")) {
+ printk(KERN_WARNING
+ "cciss I/O memory range already in "
+ "use addr=%lx length=%ld\n",
+ c->io_mem_addr, c->io_mem_length);
+ c->io_mem_addr= 0;
+ c->io_mem_length = 0;
+ }
+ break;
+ }
+ }
+
+#ifdef CCISS_DEBUG
+ printk("command = %x\n", command);
+ printk("irq = %x\n", irq);
+ printk("board_id = %x\n", board_id);
+#endif /* CCISS_DEBUG */
+
+ c->intr = irq;
+
+ /*
+ * Memory base addr is first addr , the second points to the config
+ * table
+ */
+
+ c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
+#ifdef CCISS_DEBUG
+ printk("address 0 = %x\n", c->paddr);
+#endif /* CCISS_DEBUG */
+ c->vaddr = remap_pci_mem(c->paddr, 200);
+
+ /* get the address index number */
+ cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
+ /* I am not prepared to deal with a 64 bit address value */
+ cfg_base_addr &= (__u32) 0x0000ffff;
+#ifdef CCISS_DEBUG
+ printk("cfg base address = %x\n", cfg_base_addr);
+#endif /* CCISS_DEBUG */
+ cfg_base_addr_index =
+ find_PCI_BAR_index(pdev, cfg_base_addr);
+#ifdef CCISS_DEBUG
+ printk("cfg base address index = %x\n", cfg_base_addr_index);
+#endif /* CCISS_DEBUG */
+ if (cfg_base_addr_index == -1) {
+ printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
+ release_io_mem(hba[i]);
+ return -1;
+ }
+
+ cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
+#ifdef CCISS_DEBUG
+ printk("cfg offset = %x\n", cfg_offset);
+#endif /* CCISS_DEBUG */
+ c->cfgtable = (CfgTable_struct *)
+ remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index)
+ + cfg_offset, sizeof(CfgTable_struct));
+ c->board_id = board_id;
+
+#ifdef CCISS_DEBUG
+ print_cfg_table(c->cfgtable);
+#endif /* CCISS_DEBUG */
+
+ for(i=0; i<NR_PRODUCTS; i++) {
+ if (board_id == products[i].board_id) {
+ c->product_name = products[i].product_name;
+ c->access = *(products[i].access);
+ break;
+ }
+ }
+ if (i == NR_PRODUCTS) {
+ printk(KERN_WARNING "cciss: Sorry, I don't know how"
+ " to access the Smart Array controller %08lx\n",
+ (unsigned long)board_id);
+ return -1;
+ }
+ if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
+ (readb(&c->cfgtable->Signature[1]) != 'I') ||
+ (readb(&c->cfgtable->Signature[2]) != 'S') ||
+ (readb(&c->cfgtable->Signature[3]) != 'S') ) {
+ printk("Does not appear to be a valid CISS config table\n");
+ return -1;
+ }
+#ifdef CCISS_DEBUG
+ printk("Trying to put board into Simple mode\n");
+#endif /* CCISS_DEBUG */
+ c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+ /* Update the field, and then ring the doorbell */
+ writel( CFGTBL_Trans_Simple,
+ &(c->cfgtable->HostWrite.TransportRequest));
+ writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+
+ /* Here, we wait, possibly for a long time, (4 secs or more).
+ * In some unlikely cases, (e.g. A failed 144 GB drive in a
+ * RAID 5 set was hot replaced just as we're coming in here) it
+ * can take that long. Normally (almost always) we will wait
+ * less than 1 sec. */
+ for(i=0;i<MAX_CONFIG_WAIT;i++) {
+ if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
+#endif /* CCISS_DEBUG */
+#ifdef CCISS_DEBUG
+ print_cfg_table(c->cfgtable);
+#endif /* CCISS_DEBUG */
+
+ if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ printk(KERN_WARNING "cciss: unable to get board into"
+ " simple mode\n");
+ return -1;
+ }
+ return 0;
+
+}
+
+/*
+ * Gets information about the local volumes attached to the controller.
+ */
+static void cciss_getgeometry(int cntl_num)
+{
+ ReportLunData_struct *ld_buff;
+ ReadCapdata_struct *size_buff;
+ InquiryData_struct *inq_buff;
+ int return_code;
+ int i;
+ int listlength = 0;
+ __u32 lunid = 0;
+ int block_size;
+ int total_size;
+
+ ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+ if (ld_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ return;
+ }
+ memset(ld_buff, 0, sizeof(ReportLunData_struct));
+ size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+ if (size_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ return;
+ }
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ kfree(size_buff);
+ return;
+ }
+ /* Get the firmware version */
+ return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
+ sizeof(InquiryData_struct), 0, 0 ,0, NULL);
+ if (return_code == IO_OK) {
+ hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
+ hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
+ hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
+ hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
+ } else { /* send command failed */
+ printk(KERN_WARNING "cciss: unable to determine firmware"
+ " version of controller\n");
+ }
+ /* Get the number of logical volumes */
+ return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
+ sizeof(ReportLunData_struct), 0, 0, 0, NULL);
+
+ if (return_code == IO_OK) {
+#ifdef CCISS_DEBUG
+ printk("LUN Data\n--------------------------\n");
+#endif /* CCISS_DEBUG */
+
+ listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
+ } else { /* reading number of logical volumes failed */
+ printk(KERN_WARNING "cciss: report logical volume"
+ " command failed\n");
+ listlength = 0;
+ }
+ hba[cntl_num]->num_luns = listlength / 8; /* 8 bytes pre entry */
+ if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
+ printk(KERN_ERR "cciss: only %d number of logical volumes supported\n",
+ CISS_MAX_LUN);
+ hba[cntl_num]->num_luns = CISS_MAX_LUN;
+ }
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
+ ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
+ ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
+#endif /* CCISS_DEBUG */
+
+ hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
+ for(i=0; i< hba[cntl_num]->num_luns; i++) {
+ lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
+ lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
+ lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
+ hba[cntl_num]->drv[i].LunID = lunid;
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
+ ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2],
+ ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
+#endif /* CCISS_DEBUG */
+
+ memset(size_buff, 0, sizeof(ReadCapdata_struct));
+ return_code = sendcmd(CCISS_READ_CAPACITY, cntl_num, size_buff,
+ sizeof( ReadCapdata_struct), 1, i, 0, NULL);
+ if (return_code == IO_OK) {
+ total_size = (0xff &
+ (unsigned int)(size_buff->total_size[0])) << 24;
+ total_size |= (0xff &
+ (unsigned int)(size_buff->total_size[1])) << 16;
+ total_size |= (0xff &
+ (unsigned int)(size_buff->total_size[2])) << 8;
+ total_size |= (0xff & (unsigned int)
+ (size_buff->total_size[3]));
+ total_size++; /* command returns highest */
+ /* block address */
+
+ block_size = (0xff &
+ (unsigned int)(size_buff->block_size[0])) << 24;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[1])) << 16;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[2])) << 8;
+ block_size |= (0xff &
+ (unsigned int)(size_buff->block_size[3]));
+ } else { /* read capacity command failed */
+ printk(KERN_WARNING "cciss: read capacity failed\n");
+ total_size = block_size = 0;
+ }
+ printk(KERN_INFO " blocks= %d block_size= %d\n",
+ total_size, block_size);
+
+ /* Execute the command to read the disk geometry */
+ memset(inq_buff, 0, sizeof(InquiryData_struct));
+ return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
+ sizeof(InquiryData_struct), 1, i, 0xC1, NULL );
+ if (return_code == IO_OK) {
+ if (inq_buff->data_byte[8] == 0xFF) {
+ printk(KERN_WARNING "cciss: reading geometry failed, volume does not support reading geometry\n");
+
+ hba[cntl_num]->drv[i].block_size = block_size;
+ hba[cntl_num]->drv[i].nr_blocks = total_size;
+ hba[cntl_num]->drv[i].heads = 255;
+ hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
+ /* per track */
+ hba[cntl_num]->drv[i].cylinders = total_size
+ / 255 / 32;
+ } else {
+
+ hba[cntl_num]->drv[i].block_size = block_size;
+ hba[cntl_num]->drv[i].nr_blocks = total_size;
+ hba[cntl_num]->drv[i].heads =
+ inq_buff->data_byte[6];
+ hba[cntl_num]->drv[i].sectors =
+ inq_buff->data_byte[7];
+ hba[cntl_num]->drv[i].cylinders =
+ (inq_buff->data_byte[4] & 0xff) << 8;
+ hba[cntl_num]->drv[i].cylinders +=
+ inq_buff->data_byte[5];
+ hba[cntl_num]->drv[i].raid_level =
+ inq_buff->data_byte[8];
+ }
+ }
+ else { /* Get geometry failed */
+ printk(KERN_WARNING "cciss: reading geometry failed, continuing with default geometry\n");
+
+ hba[cntl_num]->drv[i].block_size = block_size;
+ hba[cntl_num]->drv[i].nr_blocks = total_size;
+ hba[cntl_num]->drv[i].heads = 255;
+ hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
+ /* per track */
+ hba[cntl_num]->drv[i].cylinders = total_size / 255 / 32;
+ }
+ if (hba[cntl_num]->drv[i].raid_level > 5)
+ hba[cntl_num]->drv[i].raid_level = RAID_UNKNOWN;
+ printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
+ hba[cntl_num]->drv[i].heads,
+ hba[cntl_num]->drv[i].sectors,
+ hba[cntl_num]->drv[i].cylinders,
+ raid_label[hba[cntl_num]->drv[i].raid_level]);
+ }
+ kfree(ld_buff);
+ kfree(size_buff);
+ kfree(inq_buff);
+}
+
+/* Function to find the first free pointer into our hba[] array */
+/* Returns -1 if no free entries are left. */
+static int alloc_cciss_hba(void)
+{
+ int i;
+ for(i=0; i< MAX_CTLR; i++) {
+ if (hba[i] == NULL) {
+ hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+ if (hba[i]==NULL) {
+ printk(KERN_ERR "cciss: out of memory.\n");
+ return -1;
+ }
+ return i;
+ }
+ }
+ printk(KERN_WARNING
+ "cciss: This driver supports a maximum of %d controllers.\n"
+ "You can change this value in cciss.c and recompile.\n",
+ MAX_CTLR);
+ return -1;
+}
+
+static void free_hba(int i)
+{
+ kfree(hba[i]);
+ hba[i]=NULL;
+}
+#ifdef CONFIG_CISS_MONITOR_THREAD
+static void fail_all_cmds(unsigned long ctlr)
+{
+ /* If we get here, the board is apparently dead. */
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ unsigned long flags;
+
+ printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
+ h->alive = 0; /* the controller apparently died... */
+
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ pci_disable_device(h->pdev); /* Make sure it is really dead. */
+
+ /* move everything off the request queue onto the completed queue */
+ while( (c = h->reqQ) != NULL ) {
+ removeQ(&(h->reqQ), c);
+ h->Qdepth--;
+ addQ (&(h->cmpQ), c);
+ }
+
+ /* Now, fail everything on the completed queue with a HW error */
+ while( (c = h->cmpQ) != NULL ) {
+ removeQ(&h->cmpQ, c);
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ if (c->cmd_type == CMD_RWREQ) {
+ complete_command(h, c, 0);
+ } else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+# ifdef CONFIG_CISS_SCSI_TAPE
+ else if (c->cmd_type == CMD_SCSI)
+ complete_scsi_command(c, 0, 0);
+# endif
+ }
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return;
+}
+static int cciss_monitor(void *ctlr)
+{
+ /* If the board fails, we ought to detect that. So we periodically
+ send down a No-Op message and expect it to complete quickly. If it
+ doesn't, then we assume the board is dead, and fail all commands.
+ This is useful mostly in a multipath configuration, so that failover
+ will happen. */
+
+ int rc;
+ ctlr_info_t *h = (ctlr_info_t *) ctlr;
+ unsigned long flags;
+ u32 current_timer;
+
+ daemonize();
+ exit_files(current);
+ reparent_to_init();
+
+ printk("cciss%d: Monitor thread starting.\n", h->ctlr);
+
+ /* only listen to signals if the HA was loaded as a module. */
+#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
+ siginitsetinv(¤t->blocked, SHUTDOWN_SIGS);
+ sprintf(current->comm, "ccissmon%d", h->ctlr);
+ h->monitor_thread = current;
+
+ init_timer(&h->watchdog);
+ h->watchdog.function = fail_all_cmds;
+ h->watchdog.data = (unsigned long) h->ctlr;
+ while (1) {
+ /* check heartbeat timer */
+ current_timer = readl(&h->cfgtable->HeartBeat);
+ current_timer &= 0x0fffffff;
+ if (heartbeat_timer == current_timer) {
+ fail_all_cmds(h->ctlr);
+ break;
+ }
+ else
+ heartbeat_timer = current_timer;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(h->monitor_period * HZ);
+ h->watchdog.expires = jiffies + HZ * h->monitor_deadline;
+ add_timer(&h->watchdog);
+ /* send down a trivial command (no op message) to ctlr */
+ rc = sendcmd_withirq(3, h->ctlr, NULL, 0, 0, 0, 0, TYPE_MSG);
+ del_timer(&h->watchdog);
+ if (!CTLR_IS_ALIVE(h))
+ break;
+ if (signal_pending(current)) {
+ printk(KERN_WARNING "%s received signal.\n",
+ current->comm);
+ break;
+ }
+ if (h->monitor_period == 0) /* zero period means exit thread */
+ break;
+ }
+ printk(KERN_INFO "%s exiting.\n", current->comm);
+ spin_lock_irqsave(&io_request_lock, flags);
+ h->monitor_started = 0;
+ h->monitor_thread = NULL;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return 0;
+}
+static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
+ unsigned long count, int (*cciss_monitor)(void *), int *rc)
+{
+ unsigned long flags;
+ unsigned int new_period, old_period, new_deadline, old_deadline;
+
+ if (strncmp("monitor", cmd, 7) == 0) {
+ new_period = simple_strtol(cmd + 8, NULL, 10);
+ spin_lock_irqsave(&io_request_lock, flags);
+ new_deadline = h->monitor_deadline;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ } else if (strncmp("deadline", cmd, 8) == 0) {
+ new_deadline = simple_strtol(cmd + 9, NULL, 10);
+ spin_lock_irqsave(&io_request_lock, flags);
+ new_period = h->monitor_period;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ } else
+ return -1;
+ if (new_period != 0 && new_period < CCISS_MIN_PERIOD)
+ new_period = CCISS_MIN_PERIOD;
+ if (new_period > CCISS_MAX_PERIOD)
+ new_period = CCISS_MAX_PERIOD;
+ if (new_deadline >= new_period) {
+ new_deadline = new_period - 5;
+ printk(KERN_INFO "setting deadline to %d\n", new_deadline);
+ }
+ spin_lock_irqsave(&io_request_lock, flags);
+ if (h->monitor_started != 0) {
+ old_period = h->monitor_period;
+ old_deadline = h->monitor_deadline;
+ h->monitor_period = new_period;
+ h->monitor_deadline = new_deadline;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ if (new_period == 0) {
+ printk(KERN_INFO "cciss%d: stopping monitor thread\n",
+ h->ctlr);
+ *rc = count;
+ return 0;
+ }
+ if (new_period != old_period)
+ printk(KERN_INFO "cciss%d: adjusting monitor thread "
+ "period from %d to %d seconds\n",
+ h->ctlr, old_period, new_period);
+ if (new_deadline != old_deadline)
+ printk(KERN_INFO "cciss%d: adjusting monitor thread "
+ "deadline from %d to %d seconds\n",
+ h->ctlr, old_deadline, new_deadline);
+ *rc = count;
+ return 0;
+ }
+ h->monitor_started = 1;
+ h->monitor_period = new_period;
+ h->monitor_deadline = new_deadline;
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ kernel_thread(cciss_monitor, h, 0);
+ *rc = count;
+ return 0;
+}
+
+static void kill_monitor_thread(ctlr_info_t *h)
+{
+ if (h->monitor_thread)
+ send_sig(SIGKILL, h->monitor_thread, 1);
+}
+#else
+#define kill_monitor_thread(h)
+#endif
+/*
+ * This is it. Find all the controllers and register them. I really hate
+ * stealing all these major device numbers.
+ * returns the number of block devices registered.
+ */
+static int __init cciss_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ request_queue_t *q;
+ int i;
+ int j;
+ int rc;
+
+ printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
+ " bus %d dev %d func %d\n",
+ pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ i = alloc_cciss_hba();
+ if (i < 0 )
+ return -1;
+ memset(hba[i], 0, sizeof(ctlr_info_t));
+ if (cciss_pci_init(hba[i], pdev) != 0) {
+ free_hba(i);
+ return -1;
+ }
+ sprintf(hba[i]->devname, "cciss%d", i);
+ hba[i]->ctlr = i;
+
+ /* register with the major number, or get a dynamic major number */
+ /* by passing 0 as argument */
+
+ if (i < MAX_CTLR_ORIG)
+ hba[i]->major = MAJOR_NR + i;
+
+ hba[i]->pdev = pdev;
+ ASSERT_CTLR_ALIVE(hba[i]);
+
+#if 0
+ rc = (register_blkdev(hba[i]->major, hba[i]->devname, &cciss_fops));
+ if (rc < 0) {
+ printk(KERN_ERR "cciss: Unable to get major number "
+ "%d for %s\n", hba[i]->major, hba[i]->devname);
+ release_io_mem(hba[i]);
+ free_hba(i);
+ return -1;
+ } else
+ {
+ if (i < MAX_CTLR_ORIG) {
+ hba[i]->major = MAJOR_NR + i;
+ map_major_to_ctlr[MAJOR_NR + i] = i;
+ } else {
+ hba[i]->major = rc;
+ map_major_to_ctlr[rc] = i;
+ }
+ }
+
+XXXX Need to register this...
+
+#endif
+
+ /* make sure the board interrupts are off */
+ hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
+ if (request_irq(hba[i]->intr, do_cciss_intr,
+ SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
+ hba[i]->devname, hba[i])) {
+
+ printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
+ hba[i]->intr, hba[i]->devname);
+ unregister_blkdev( hba[i]->major, hba[i]->devname);
+ map_major_to_ctlr[hba[i]->major] = 0;
+ release_io_mem(hba[i]);
+ free_hba(i);
+ return -1;
+ }
+ hba[i]->cmd_pool_bits = (__u32*)kmalloc(
+ ((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
+ hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
+ hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+ &(hba[i]->cmd_pool_dhandle));
+ hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
+ hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
+ &(hba[i]->errinfo_pool_dhandle));
+ if ((hba[i]->cmd_pool_bits == NULL)
+ || (hba[i]->cmd_pool == NULL)
+ || (hba[i]->errinfo_pool == NULL)) {
+
+ if (hba[i]->cmd_pool_bits)
+ kfree(hba[i]->cmd_pool_bits);
+ if (hba[i]->cmd_pool)
+ pci_free_consistent(hba[i]->pdev,
+ NR_CMDS * sizeof(CommandList_struct),
+ hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+ if (hba[i]->errinfo_pool)
+ pci_free_consistent(hba[i]->pdev,
+ NR_CMDS * sizeof( ErrorInfo_struct),
+ hba[i]->errinfo_pool,
+ hba[i]->errinfo_pool_dhandle);
+ free_irq(hba[i]->intr, hba[i]);
+ unregister_blkdev(hba[i]->major, hba[i]->devname);
+ map_major_to_ctlr[hba[i]->major] = 0;
+ release_io_mem(hba[i]);
+ free_hba(i);
+ printk( KERN_ERR "cciss: out of memory");
+ return -1;
+ }
+
+ /* Initialize the pdev driver private data.
+ have it point to hba[i]. */
+ pci_set_drvdata(pdev, hba[i]);
+ /* command and error info recs zeroed out before
+ they are used */
+ memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
+
+#ifdef CCISS_DEBUG
+ printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
+#endif /* CCISS_DEBUG */
+
+ cciss_getgeometry(i);
+
+ cciss_find_non_disk_devices(i); /* find our tape drives, if any */
+
+ /* Turn the interrupts on so we can service requests */
+ hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+
+ cciss_procinit(i);
+
+ q = BLK_DEFAULT_QUEUE(hba[i]->major);
+ q->queuedata = hba[i];
+ blk_init_queue(q, do_cciss_request);
+#if 0
+ // XXX SMH; no bounce support for us yet
+ blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
+#endif
+ blk_queue_headactive(q, 0);
+
+ /* fill in the other Kernel structs */
+ blksize_size[hba[i]->major] = hba[i]->blocksizes;
+ hardsect_size[hba[i]->major] = hba[i]->hardsizes;
+ read_ahead[hba[i]->major] = READ_AHEAD;
+
+ /* Set the pointers to queue functions */
+ q->back_merge_fn = cpq_back_merge_fn;
+ q->front_merge_fn = cpq_front_merge_fn;
+ q->merge_requests_fn = cpq_merge_requests_fn;
+
+
+ /* Fill in the gendisk data */
+ hba[i]->gendisk.major = hba[i]->major;
+ hba[i]->gendisk.major_name = "cciss";
+ hba[i]->gendisk.minor_shift = NWD_SHIFT;
+ hba[i]->gendisk.max_p = MAX_PART;
+ hba[i]->gendisk.part = hba[i]->hd;
+ hba[i]->gendisk.sizes = hba[i]->sizes;
+ hba[i]->gendisk.nr_real = hba[i]->highest_lun+1;
+ hba[i]->gendisk.fops = &cciss_fops;
+
+ /* Get on the disk list */
+ add_gendisk(&(hba[i]->gendisk));
+
+ cciss_geninit(i);
+ for(j=0; j<NWD; j++)
+ register_disk(&(hba[i]->gendisk),
+ MKDEV(hba[i]->major, j <<4),
+ MAX_PART, &cciss_fops,
+ hba[i]->drv[j].nr_blocks);
+
+ cciss_register_scsi(i, 1); /* hook ourself into SCSI subsystem */
+
+ return 1;
+}
+
+static void __devexit cciss_remove_one (struct pci_dev *pdev)
+{
+ ctlr_info_t *tmp_ptr;
+ int i;
+ char flush_buf[4];
+ int return_code;
+
+ if (pci_get_drvdata(pdev) == NULL) {
+ printk( KERN_ERR "cciss: Unable to remove device \n");
+ return;
+ }
+ tmp_ptr = pci_get_drvdata(pdev);
+ i = tmp_ptr->ctlr;
+ if (hba[i] == NULL) {
+ printk(KERN_ERR "cciss: device appears to "
+ "already be removed \n");
+ return;
+ }
+ kill_monitor_thread(hba[i]);
+ /* no sense in trying to flush a dead board's cache. */
+ if (CTLR_IS_ALIVE(hba[i])) {
+ /* Turn board interrupts off and flush the cache */
+ /* write all data in the battery backed cache to disks */
+ memset(flush_buf, 0, 4);
+ return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf,
+ 4, 0, 0, 0, NULL);
+ if (return_code != IO_OK)
+ printk(KERN_WARNING
+ "cciss%d: Error flushing cache\n", i);
+ }
+ free_irq(hba[i]->intr, hba[i]);
+ pci_set_drvdata(pdev, NULL);
+ iounmap((void*)hba[i]->vaddr);
+ cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
+ unregister_blkdev(hba[i]->major, hba[i]->devname);
+ map_major_to_ctlr[hba[i]->major] = 0;
+ //remove_proc_entry(hba[i]->devname, proc_cciss);
+
+
+ /* remove it from the disk list */
+ del_gendisk(&(hba[i]->gendisk));
+
+ pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+ hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+ pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
+ hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
+ kfree(hba[i]->cmd_pool_bits);
+ release_io_mem(hba[i]);
+ free_hba(i);
+}
+
+static struct pci_driver cciss_pci_driver = {
+ name: "cciss",
+ probe: cciss_init_one,
+ remove: __devexit_p(cciss_remove_one),
+ id_table: cciss_pci_device_id, /* id_table */
+};
+
+/*
+* This is it. Register the PCI driver information for the cards we control
+* the OS will call our registered routines when it finds one of our cards.
+*/
+int __init cciss_init(void)
+{
+
+ printk(KERN_INFO DRIVER_NAME "\n");
+ /* Register for out PCI devices */
+ return pci_module_init(&cciss_pci_driver);
+}
+
+EXPORT_NO_SYMBOLS;
+static int __init init_cciss_module(void)
+{
+
+ return cciss_init();
+}
+
+static void __exit cleanup_cciss_module(void)
+{
+ int i;
+
+ pci_unregister_driver(&cciss_pci_driver);
+ /* double check that all controller entrys have been removed */
+ for (i=0; i< MAX_CTLR; i++) {
+ if (hba[i] != NULL) {
+ printk(KERN_WARNING "cciss: had to remove"
+ " controller %d\n", i);
+ cciss_remove_one(hba[i]->pdev);
+ }
+ }
+ //remove_proc_entry("cciss", proc_root_driver);
+}
+
+module_init(init_cciss_module);
+module_exit(cleanup_cciss_module);
--- /dev/null
+/*
+ * Disk Array driver for HP SA 5xxx and 6xxx Controllers, SCSI Tape module
+ * Copyright 2001, 2002 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
+ *
+ * Author: Stephen M. Cameron
+ */
+#ifdef CONFIG_CISS_SCSI_TAPE
+
+/* Here we have code to present the driver as a scsi driver
+ as it is simultaneously presented as a block driver. The
+ reason for doing this is to allow access to SCSI tape drives
+ through the array controller. Note in particular, neither
+ physical nor logical disks are presented through the scsi layer. */
+
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include <asm/atomic.h>
+#include <linux/timer.h>
+
+#include "cciss_scsi.h"
+
+/* some prototypes... */
+static int sendcmd(
+ __u8 cmd,
+ int ctlr,
+ void *buff,
+ size_t size,
+ unsigned int use_unit_num, /* 0: address the controller,
+ 1: address logical volume log_unit,
+ 2: address is in scsi3addr */
+ unsigned int log_unit,
+ __u8 page_code,
+ unsigned char *scsi3addr );
+
+
+int __init cciss_scsi_detect(Scsi_Host_Template *tpnt);
+int cciss_scsi_release(struct Scsi_Host *sh);
+const char *cciss_scsi_info(struct Scsi_Host *sa);
+
+int cciss_scsi_proc_info(
+ char *buffer, /* data buffer */
+ char **start, /* where data in buffer starts */
+ off_t offset, /* offset from start of imaginary file */
+ int length, /* length of data in buffer */
+ int hostnum, /* which host adapter (always zero for me) */
+ int func); /* 0 == read, 1 == write */
+
+int cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *));
+#if 0
+int cciss_scsi_abort(Scsi_Cmnd *cmd);
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+int cciss_scsi_reset(Scsi_Cmnd *cmd, unsigned int reset_flags);
+#else
+int cciss_scsi_reset(Scsi_Cmnd *cmd);
+#endif
+#endif
+
+static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR];
+
+/* We need one Scsi_Host_Template *per controller* instead of
+ the usual one Scsi_Host_Template per controller *type*. This
+ is so PCI hot plug could have a remote possibility of still
+ working even with the SCSI system. It's so
+ scsi_unregister_module will differentiate the controllers.
+ When register_scsi_module is called, each host template is
+ customized (name change) in cciss_register_scsi()
+ (that's called from cciss.c:cciss_init_one()) */
+
+static
+Scsi_Host_Template driver_template[MAX_CTLR];
+
+#pragma pack(1)
+struct cciss_scsi_cmd_stack_elem_t {
+ CommandList_struct cmd;
+ ErrorInfo_struct Err;
+ __u32 busaddr; // 32 bits always, must fit through cmd register.
+};
+
+#pragma pack()
+
+#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
+ CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
+ // plus two for init time usage
+
+#pragma pack(1)
+struct cciss_scsi_cmd_stack_t {
+ struct cciss_scsi_cmd_stack_elem_t *pool;
+ struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
+ dma_addr_t cmd_pool_handle;
+ int top;
+};
+#pragma pack()
+
+struct cciss_scsi_adapter_data_t {
+ struct Scsi_Host *scsi_host;
+ struct cciss_scsi_cmd_stack_t cmd_stack;
+ int registered;
+ spinlock_t lock; // to protect ccissscsi[ctlr];
+};
+#if 1
+#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
+ &(((struct cciss_scsi_adapter_data_t *) \
+ hba[ctlr]->scsi_ctlr)->lock), flags);
+#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
+ &(((struct cciss_scsi_adapter_data_t *) \
+ hba[ctlr]->scsi_ctlr)->lock), flags);
+#else
+#define CPQ_TAPE_LOCK(x,y)
+#define CPQ_TAPE_UNLOCK(x,y)
+#endif
+
+static CommandList_struct *
+scsi_cmd_alloc(ctlr_info_t *h)
+{
+ /* assume only one process in here at a time, locking done by caller. */
+
+ /* take the top memory chunk off the stack and return it, if any. */
+ struct cciss_scsi_cmd_stack_elem_t *c;
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+ u64bit temp64;
+
+ sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+ stk = &sa->cmd_stack;
+
+ if (stk->top < 0)
+ return NULL;
+ c = stk->elem[stk->top];
+ memset(&c->cmd, 0, sizeof(c->cmd));
+ memset(&c->Err, 0, sizeof(c->Err));
+ /* set physical addr of cmd and addr of scsi parameters */
+ c->cmd.busaddr = c->busaddr;
+
+ temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
+ stk->top--;
+ c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
+ c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
+ c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
+
+ c->cmd.ctlr = h->ctlr;
+ c->cmd.err_info = &c->Err;
+
+ return (CommandList_struct *) c;
+}
+
+static void
+scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
+{
+ /* assume only one process in here at a time, locking done by caller. */
+ /* drop the free memory chunk on top of the stack. */
+
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+
+ sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+ stk = &sa->cmd_stack;
+ if (stk->top >= CMD_STACK_SIZE) {
+ printk("cciss: scsi_cmd_free called too many times.\n");
+ BUG();
+ }
+ stk->top++;
+ stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
+}
+
+static int
+scsi_cmd_stack_setup(int ctlr)
+{
+ int i;
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+ size_t size;
+
+ sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+ stk = &sa->cmd_stack;
+ size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
+
+ stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
+ pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
+
+ if (stk->pool == NULL) {
+ printk("stk->pool is null\n");
+ return -1;
+ }
+
+ for (i=0; i<CMD_STACK_SIZE; i++) {
+ stk->elem[i] = &stk->pool[i];
+ stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
+ (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
+ }
+ stk->top = CMD_STACK_SIZE-1;
+ return 0;
+}
+
+static void
+scsi_cmd_stack_free(int ctlr)
+{
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+ size_t size;
+
+ sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+ stk = &sa->cmd_stack;
+ if (stk->top != CMD_STACK_SIZE-1) {
+ printk( "cciss: %d scsi commands are still outstanding.\n",
+ CMD_STACK_SIZE - stk->top);
+ // BUG();
+ printk("WE HAVE A BUG HERE!!! stk=0x%08x\n",
+ (unsigned int) stk);
+ }
+ size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
+
+ pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
+ stk->pool = NULL;
+}
+
+/* scsi_device_types comes from scsi.h */
+#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
+ "Unknown" : scsi_device_types[n]
+
+#if 0
+static int xmargin=8;
+static int amargin=60;
+
+static void
+print_bytes (unsigned char *c, int len, int hex, int ascii)
+{
+
+ int i;
+ unsigned char *x;
+
+ if (hex)
+ {
+ x = c;
+ for (i=0;i<len;i++)
+ {
+ if ((i % xmargin) == 0 && i>0) printk("\n");
+ if ((i % xmargin) == 0) printk("0x%04x:", i);
+ printk(" %02x", *x);
+ x++;
+ }
+ printk("\n");
+ }
+ if (ascii)
+ {
+ x = c;
+ for (i=0;i<len;i++)
+ {
+ if ((i % amargin) == 0 && i>0) printk("\n");
+ if ((i % amargin) == 0) printk("0x%04x:", i);
+ if (*x > 26 && *x < 128) printk("%c", *x);
+ else printk(".");
+ x++;
+ }
+ printk("\n");
+ }
+}
+
+static void
+print_cmd(CommandList_struct *cp)
+{
+ printk("queue:%d\n", cp->Header.ReplyQueue);
+ printk("sglist:%d\n", cp->Header.SGList);
+ printk("sgtot:%d\n", cp->Header.SGTotal);
+ printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
+ cp->Header.Tag.lower);
+ printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ cp->Header.LUN.LunAddrBytes[0],
+ cp->Header.LUN.LunAddrBytes[1],
+ cp->Header.LUN.LunAddrBytes[2],
+ cp->Header.LUN.LunAddrBytes[3],
+ cp->Header.LUN.LunAddrBytes[4],
+ cp->Header.LUN.LunAddrBytes[5],
+ cp->Header.LUN.LunAddrBytes[6],
+ cp->Header.LUN.LunAddrBytes[7]);
+ printk("CDBLen:%d\n", cp->Request.CDBLen);
+ printk("Type:%d\n",cp->Request.Type.Type);
+ printk("Attr:%d\n",cp->Request.Type.Attribute);
+ printk(" Dir:%d\n",cp->Request.Type.Direction);
+ printk("Timeout:%d\n",cp->Request.Timeout);
+ printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ cp->Request.CDB[0], cp->Request.CDB[1],
+ cp->Request.CDB[2], cp->Request.CDB[3],
+ cp->Request.CDB[4], cp->Request.CDB[5],
+ cp->Request.CDB[6], cp->Request.CDB[7],
+ cp->Request.CDB[8], cp->Request.CDB[9],
+ cp->Request.CDB[10], cp->Request.CDB[11],
+ cp->Request.CDB[12], cp->Request.CDB[13],
+ cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
+ cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
+ cp->ErrDesc.Len);
+ printk("sgs..........Errorinfo:\n");
+ printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
+ printk("senselen:%d\n", cp->err_info->SenseLen);
+ printk("cmd status:%d\n", cp->err_info->CommandStatus);
+ printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
+ printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
+ printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
+ printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
+
+}
+
+#endif
+
+static int
+find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
+{
+ /* finds an unused bus, target, lun for a new device */
+ /* assumes hba[ctlr]->scsi_ctlr->lock is held */
+ int i, found=0;
+ unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
+
+ memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
+
+# if SELF_SCSI_ID >= 0
+ target_taken[SELF_SCSI_ID] = 1;
+# endif
+ for (i=0;i<ccissscsi[ctlr].ndevices;i++)
+ target_taken[ccissscsi[ctlr].dev[i].target] = 1;
+
+ for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
+ if (!target_taken[i]) {
+ *bus = 0; *target=i; *lun = 0; found=1;
+ break;
+ }
+ }
+ return (!found);
+}
+
+static int
+cciss_scsi_add_entry(int ctlr, int hostno,
+ unsigned char *scsi3addr, int devtype)
+{
+ /* assumes hba[ctlr]->scsi_ctlr->lock is held */
+ int n = ccissscsi[ctlr].ndevices;
+ struct cciss_scsi_dev_t *sd;
+
+ if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+ printk("cciss%d: Too many devices, "
+ "some will be inaccessible.\n", ctlr);
+ return -1;
+ }
+ sd = &ccissscsi[ctlr].dev[n];
+ if (find_bus_target_lun(ctlr, &sd->bus, &sd->target, &sd->lun) != 0)
+ return -1;
+ memcpy(&sd->scsi3addr[0], scsi3addr, 8);
+ sd->devtype = devtype;
+ ccissscsi[ctlr].ndevices++;
+
+ /* initially, (before registering with scsi layer) we don't
+ know our hostno and we don't want to print anything first
+ time anyway (the scsi layer's inquiries will show that info) */
+ if (hostno != -1)
+ printk("cciss%d: %s device c%db%dt%dl%d added.\n",
+ ctlr, DEVICETYPE(sd->devtype), hostno,
+ sd->bus, sd->target, sd->lun);
+ return 0;
+}
+
+static void
+cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
+{
+ /* assumes hba[ctlr]->scsi_ctlr->lock is held */
+ int i;
+ struct cciss_scsi_dev_t sd;
+
+ if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
+ sd = ccissscsi[ctlr].dev[entry];
+ for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
+ ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
+ ccissscsi[ctlr].ndevices--;
+ printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+ ctlr, DEVICETYPE(sd.devtype), hostno,
+ sd.bus, sd.target, sd.lun);
+}
+
+
+#define SCSI3ADDR_EQ(a,b) ( \
+ (a)[7] == (b)[7] && \
+ (a)[6] == (b)[6] && \
+ (a)[5] == (b)[5] && \
+ (a)[4] == (b)[4] && \
+ (a)[3] == (b)[3] && \
+ (a)[2] == (b)[2] && \
+ (a)[1] == (b)[1] && \
+ (a)[0] == (b)[0])
+
+static int
+adjust_cciss_scsi_table(int ctlr, int hostno,
+ struct cciss_scsi_dev_t sd[], int nsds)
+{
+ /* sd contains scsi3 addresses and devtypes, but
+ bus target and lun are not filled in. This funciton
+ takes what's in sd to be the current and adjusts
+ ccissscsi[] to be in line with what's in sd. */
+
+ int i,j, found, changes=0;
+ struct cciss_scsi_dev_t *csd;
+ unsigned long flags;
+
+ CPQ_TAPE_LOCK(ctlr, flags);
+
+ /* find any devices in ccissscsi[] that are not in
+ sd[] and remove them from ccissscsi[] */
+
+ i = 0;
+ while(i<ccissscsi[ctlr].ndevices) {
+ csd = &ccissscsi[ctlr].dev[i];
+ found=0;
+ for (j=0;j<nsds;j++) {
+ if (SCSI3ADDR_EQ(sd[j].scsi3addr,
+ csd->scsi3addr)) {
+ if (sd[j].devtype == csd->devtype)
+ found=2;
+ else
+ found=1;
+ break;
+ }
+ }
+
+ if (found == 0) { /* device no longer present. */
+ changes++;
+ /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+ ctlr, DEVICETYPE(csd->devtype), hostno,
+ csd->bus, csd->target, csd->lun); */
+ cciss_scsi_remove_entry(ctlr, hostno, i);
+ /* note, i not incremented */
+ }
+ else if (found == 1) { /* device is different kind */
+ changes++;
+ printk("cciss%d: device c%db%dt%dl%d type changed "
+ "(device type now %s).\n",
+ ctlr, hostno, csd->bus, csd->target, csd->lun,
+ DEVICETYPE(csd->devtype));
+ csd->devtype = sd[j].devtype;
+ i++; /* so just move along. */
+ } else /* device is same as it ever was, */
+ i++; /* so just move along. */
+ }
+
+ /* Now, make sure every device listed in sd[] is also
+ listed in ccissscsi[], adding them if they aren't found */
+
+ for (i=0;i<nsds;i++) {
+ found=0;
+ for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
+ csd = &ccissscsi[ctlr].dev[j];
+ if (SCSI3ADDR_EQ(sd[i].scsi3addr,
+ csd->scsi3addr)) {
+ if (sd[i].devtype == csd->devtype)
+ found=2; /* found device */
+ else
+ found=1; /* found a bug. */
+ break;
+ }
+ }
+ if (!found) {
+ changes++;
+ if (cciss_scsi_add_entry(ctlr, hostno,
+ &sd[i].scsi3addr[0], sd[i].devtype) != 0)
+ break;
+ } else if (found == 1) {
+ /* should never happen... */
+ changes++;
+ printk("cciss%d: device unexpectedly changed type\n",
+ ctlr);
+ /* but if it does happen, we just ignore that device */
+ }
+ }
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+
+ if (!changes)
+ printk("cciss%d: No device changes detected.\n", ctlr);
+
+ return 0;
+}
+
+static int
+lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
+{
+ int i;
+ struct cciss_scsi_dev_t *sd;
+ unsigned long flags;
+
+ CPQ_TAPE_LOCK(ctlr, flags);
+ for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
+ sd = &ccissscsi[ctlr].dev[i];
+ if (sd->bus == bus &&
+ sd->target == target &&
+ sd->lun == lun) {
+ memcpy(scsi3addr, &sd->scsi3addr[0], 8);
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ return 0;
+ }
+ }
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ return -1;
+}
+
+
+static void
+cciss_find_non_disk_devices(int cntl_num)
+{
+ ReportLunData_struct *ld_buff;
+ InquiryData_struct *inq_buff;
+ int return_code;
+ int i;
+ int listlength = 0;
+ int num_luns;
+ unsigned char scsi3addr[8];
+ unsigned long flags;
+ int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
+
+ hba[cntl_num]->scsi_ctlr = (void *)
+ kmalloc(sizeof(struct cciss_scsi_adapter_data_t),
+ GFP_KERNEL);
+ if (hba[cntl_num]->scsi_ctlr == NULL)
+ return;
+
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[cntl_num]->scsi_ctlr)->scsi_host = NULL;
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[cntl_num]->scsi_ctlr)->lock = SPIN_LOCK_UNLOCKED;
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[cntl_num]->scsi_ctlr)->registered = 0;
+
+ if (scsi_cmd_stack_setup(cntl_num) != 0) {
+ printk("Trouble, returned non-zero!\n");
+ return;
+ }
+
+ ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
+ if (ld_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ return;
+ }
+ memset(ld_buff, 0, sizeof(ReportLunData_struct));
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ return;
+ }
+
+ /* Get the physical luns */
+ return_code = sendcmd(CISS_REPORT_PHYS, cntl_num, ld_buff,
+ reportlunsize, 0, 0, 0, NULL );
+
+ if( return_code == IO_OK) {
+ unsigned char *c = &ld_buff->LUNListLength[0];
+ listlength = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3];
+ }
+ else { /* getting report of physical luns failed */
+ printk(KERN_WARNING "cciss: report physical luns"
+ " command failed\n");
+ listlength = 0;
+ }
+
+ CPQ_TAPE_LOCK(cntl_num, flags);
+ ccissscsi[cntl_num].ndevices = 0;
+ num_luns = listlength / 8; // 8 bytes pre entry
+ /* printk("Found %d LUNs\n", num_luns); */
+
+ if (num_luns > CISS_MAX_PHYS_LUN)
+ {
+ printk(KERN_WARNING
+ "cciss: Maximum physical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
+ num_luns - CISS_MAX_PHYS_LUN);
+ num_luns = CISS_MAX_PHYS_LUN;
+ }
+
+ for(i=0; i<num_luns; i++) {
+ /* Execute an inquiry to figure the device type */
+ memset(inq_buff, 0, sizeof(InquiryData_struct));
+ memcpy(scsi3addr, ld_buff->LUN[i], 8); /* ugly... */
+ return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
+ sizeof(InquiryData_struct), 2, 0 ,0, scsi3addr );
+ if (return_code == IO_OK) {
+ if(inq_buff->data_byte[8] == 0xFF)
+ {
+ printk(KERN_WARNING "cciss: inquiry failed\n");
+ } else {
+ int devtype;
+
+ /* printk("Inquiry...\n");
+ print_bytes((unsigned char *) inq_buff, 36, 1, 1); */
+ devtype = (inq_buff->data_byte[0] & 0x1f);
+
+ switch (devtype)
+ {
+ case 0x01: /* sequential access, (tape) */
+ case 0x08: /* medium changer */
+ /* this is the only kind of dev */
+ /* we want to expose here. */
+ if (cciss_scsi_add_entry(cntl_num, -1,
+ (unsigned char *) ld_buff->LUN[i],
+ devtype) != 0)
+ i=num_luns; // leave loop
+ break;
+ default:
+ break;
+ }
+
+ }
+ }
+ else printk("cciss: inquiry failed.\n");
+ }
+#if 0
+ for (i=0;i<ccissscsi[cntl_num].ndevices;i++)
+ printk("Tape device presented at c%db%dt%dl%d\n",
+ cntl_num, // <-- this is wrong
+ ccissscsi[cntl_num].dev[i].bus,
+ ccissscsi[cntl_num].dev[i].target,
+ ccissscsi[cntl_num].dev[i].lun);
+#endif
+ CPQ_TAPE_UNLOCK(cntl_num, flags);
+ kfree(ld_buff);
+ kfree(inq_buff);
+ return;
+}
+
+static void
+complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+{
+ Scsi_Cmnd *cmd;
+ ctlr_info_t *ctlr;
+ u64bit addr64;
+ ErrorInfo_struct *ei;
+
+ ei = cp->err_info;
+
+ /* First, see if it was a message rather than a command */
+ if (cp->Request.Type.Type == TYPE_MSG) {
+ cp->cmd_type = CMD_MSG_DONE;
+ return;
+ }
+
+ /* we stored ptr to scsi cmd in the buffer head pointer */
+ cmd = (Scsi_Cmnd *) cp->scsi_cmd;
+ ctlr = hba[cp->ctlr];
+
+ /* undo the DMA mappings */
+
+ if (cmd->use_sg) {
+ pci_unmap_sg(ctlr->pdev,
+ cmd->buffer, cmd->use_sg,
+ scsi_to_pci_dma_dir(cmd->sc_data_direction));
+ }
+ else if (cmd->request_bufflen) {
+ addr64.val32.lower = cp->SG[0].Addr.lower;
+ addr64.val32.upper = cp->SG[0].Addr.upper;
+ pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
+ cmd->request_bufflen,
+ scsi_to_pci_dma_dir(cmd->sc_data_direction));
+ }
+
+ cmd->result = (DID_OK << 16); /* host byte */
+ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ /* cmd->result |= (GOOD < 1); */ /* status byte */
+
+ cmd->result |= (ei->ScsiStatus);
+ /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */
+
+ /* copy the sense data whether we need to or not. */
+
+ memcpy(cmd->sense_buffer, ei->SenseInfo,
+ ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+ SCSI_SENSE_BUFFERSIZE :
+ ei->SenseLen);
+ cmd->resid = ei->ResidualCnt;
+
+ if(ei->CommandStatus != 0)
+ { /* an error has occurred */
+ switch(ei->CommandStatus)
+ {
+ case CMD_TARGET_STATUS:
+ /* Pass it up to the upper layers... */
+ if( ei->ScsiStatus)
+ cmd->result |= (ei->ScsiStatus < 1);
+ else { /* scsi status is zero??? How??? */
+
+ /* Ordinarily, this case should never happen, but there is a bug
+ in some released firmware revisions that allows it to happen
+ if, for example, a 4100 backplane loses power and the tape
+ drive is in it. We assume that it's a fatal error of some
+ kind because we can't show that it wasn't. We will make it
+ look like selection timeout since that is the most common
+ reason for this to occur, and it's severe enough. */
+
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ break;
+ case CMD_DATA_OVERRUN:
+ printk(KERN_WARNING "cciss: cp %p has"
+ " completed with data overrun "
+ "reported\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* print_bytes(cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ /* We get CMD_INVALID if you address a non-existent tape drive instead
+ of a selection timeout (no response). You will see this if you yank
+ out a tape drive, then try to access it. This is kind of a shame
+ because it means that any other CMD_INVALID (e.g. driver bug) will
+ get interpreted as a missing target. */
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cp %p has "
+ "protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+ printk(KERN_WARNING "cciss: cp %p had "
+ " hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ cmd->result = DID_ERROR << 16;
+ printk(KERN_WARNING "cciss: cp %p had "
+ "connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ cmd->result = DID_ABORT << 16;
+ printk(KERN_WARNING "cciss: cp %p was "
+ "aborted\n", cp);
+ break;
+ case CMD_ABORT_FAILED:
+ cmd->result = DID_ERROR << 16;
+ printk(KERN_WARNING "cciss: cp %p reports "
+ "abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ cmd->result = DID_ABORT << 16;
+ printk(KERN_WARNING "cciss: cp %p aborted "
+ "do to an unsolicited abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ cmd->result = DID_TIME_OUT << 16;
+ printk(KERN_WARNING "cciss: cp %p timedout\n",
+ cp);
+ break;
+ default:
+ cmd->result = DID_ERROR << 16;
+ printk(KERN_WARNING "cciss: cp %p returned "
+ "unknown status %x\n", cp,
+ ei->CommandStatus);
+ }
+ }
+ cmd->scsi_done(cmd);
+ scsi_cmd_free(ctlr, cp);
+}
+
+/* cciss_scsi_detect is called from the scsi mid layer.
+ The scsi mid layer (scsi_register_module) is
+ called from cciss.c:cciss_init_one(). */
+
+int __init
+cciss_scsi_detect(Scsi_Host_Template *tpnt)
+{
+ int i;
+ struct Scsi_Host *sh;
+
+ /* Tell the kernel we want to be a SCSI driver... */
+ sh = scsi_register(tpnt, sizeof(struct ctlr_info *));
+ if (sh == NULL) return 0;
+
+ sh->io_port = 0; // good enough? FIXME,
+ sh->n_io_port = 0; // I don't think we use these two...
+
+ sh->this_id = SELF_SCSI_ID;
+
+ i = simple_strtol((char *)&tpnt->name[5], NULL, 10);
+
+ if (i<0 || i>=MAX_CTLR || hba[i] == NULL) {
+ /* we didn't find ourself... we shouldn't get here. */
+ printk("cciss_scsi_detect: could not find ourself in hba[]\n");
+ return 0;
+ }
+
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[i]->scsi_ctlr)->scsi_host = (void *) sh;
+ sh->hostdata[0] = (unsigned long) hba[i];
+ sh->irq = hba[i]->intr;
+ sh->unique_id = sh->irq;
+ scsi_set_pci_device(sh, hba[i]->pdev);
+
+ return 1; /* Say we have 1 scsi adapter, this will be */
+ /* called multiple times, once for each adapter */
+ /* from cciss.c:cciss_init_one(). We do it this */
+ /* way for PCI-hot plug reasons. (we don't know how */
+ /* many adapters we have total, so we say we have */
+ /* 1, each of a unique type.) */
+}
+
+static void __exit cleanup_cciss_module(void);
+int
+cciss_scsi_release(struct Scsi_Host *sh)
+{
+ return 0;
+}
+
+static void
+cciss_unmap_one(struct pci_dev *pdev,
+ CommandList_struct *cp,
+ size_t buflen,
+ int data_direction)
+{
+ u64bit addr64;
+
+ addr64.val32.lower = cp->SG[0].Addr.lower;
+ addr64.val32.upper = cp->SG[0].Addr.upper;
+ pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
+}
+
+static void
+cciss_map_one(struct pci_dev *pdev,
+ CommandList_struct *cp,
+ unsigned char *buf,
+ size_t buflen,
+ int data_direction)
+{
+ __u64 addr64;
+
+ addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+ cp->SG[0].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Len = buflen;
+ cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+}
+
+static int
+cciss_scsi_do_simple_cmd(ctlr_info_t *c,
+ CommandList_struct *cp,
+ unsigned char *scsi3addr,
+ unsigned char *cdb,
+ unsigned char cdblen,
+ unsigned char *buf, int bufsize,
+ int direction)
+{
+ unsigned long flags;
+ DECLARE_COMPLETION(wait);
+
+ cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl
+ cp->scsi_cmd = NULL;
+ cp->Header.ReplyQueue = 0; // unused in simple mode
+ memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
+ cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
+ // Fill in the request block...
+
+ /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
+ scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
+
+ memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+ memcpy(cp->Request.CDB, cdb, cdblen);
+ cp->Request.Timeout = 0; // No timeout
+ cp->Request.CDBLen = cdblen;
+ cp->Request.Type.Type = TYPE_CMD;
+ cp->Request.Type.Attribute = ATTR_SIMPLE;
+ cp->Request.Type.Direction = direction;
+
+ /* Fill in the SG list and do dma mapping */
+ cciss_map_one(c->pdev, cp,
+ (unsigned char *) buf, bufsize,
+ scsi_to_pci_dma_dir(SCSI_DATA_READ));
+
+ cp->waiting = &wait;
+
+ /* Put the request on the tail of the request queue */
+ spin_lock_irqsave(&io_request_lock, flags);
+ addQ(&c->reqQ, cp);
+ c->Qdepth++;
+ start_io(c);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
+ wait_for_completion(&wait);
+
+ /* undo the dma mapping */
+ cciss_unmap_one(c->pdev, cp, bufsize,
+ scsi_to_pci_dma_dir(SCSI_DATA_READ));
+
+ return(0);
+}
+
+static void
+cciss_scsi_interpret_error(CommandList_struct *cp)
+{
+ ErrorInfo_struct *ei;
+
+ ei = cp->err_info;
+ switch(ei->CommandStatus)
+ {
+ case CMD_TARGET_STATUS:
+ printk(KERN_WARNING "cciss: cmd %p has "
+ "completed with errors\n", cp);
+ printk(KERN_WARNING "cciss: cmd %p "
+ "has SCSI Status = %x\n",
+ cp,
+ ei->ScsiStatus);
+ if (ei->ScsiStatus == 0)
+ printk(KERN_WARNING
+ "cciss:SCSI status is abnormally zero. "
+ "(probably indicates selection timeout "
+ "reported incorrectly due to a known "
+ "firmware bug, circa July, 2001.)\n");
+ break;
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ printk("UNDERRUN\n");
+ break;
+ case CMD_DATA_OVERRUN:
+ printk(KERN_WARNING "cciss: cp %p has"
+ " completed with data overrun "
+ "reported\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* controller unfortunately reports SCSI passthru's */
+ /* to non-existent targets as invalid commands. */
+ printk(KERN_WARNING "cciss: cp %p is "
+ "reported invalid (probably means " "target device no longer present)\n",
+ cp);
+ /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cp %p has "
+ "protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ /* cmd->result = DID_ERROR << 16; */
+ printk(KERN_WARNING "cciss: cp %p had "
+ " hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cp %p had "
+ "connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cp %p was "
+ "aborted\n", cp);
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cp %p reports "
+ "abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING "cciss: cp %p aborted "
+ "do to an unsolicited abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ printk(KERN_WARNING "cciss: cp %p timedout\n",
+ cp);
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cp %p returned "
+ "unknown status %x\n", cp,
+ ei->CommandStatus);
+ }
+}
+
+static int
+cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
+ InquiryData_struct *buf)
+{
+ int rc;
+ CommandList_struct *cp;
+ char cdb[6];
+ ErrorInfo_struct *ei;
+
+ cp = scsi_cmd_alloc(c);
+ ei = cp->err_info;
+
+ if (cp == NULL) { /* trouble... */
+ printk("cmd_alloc returned NULL!\n");
+ return -1;
+ }
+
+ cdb[0] = CISS_INQUIRY;
+ cdb[1] = 0;
+ cdb[2] = 0;
+ cdb[3] = 0;
+ cdb[4] = sizeof(*buf) & 0xff;
+ cdb[5] = 0;
+ rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
+ 6, (unsigned char *) buf,
+ sizeof(*buf), XFER_READ);
+
+ if (rc != 0) return rc; /* something went wrong */
+
+ if (ei->CommandStatus != 0 &&
+ ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ cciss_scsi_interpret_error(cp);
+ scsi_cmd_free(c, cp);
+ return -1;
+ }
+ scsi_cmd_free(c, cp);
+ return 0;
+}
+
+static int
+cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
+ ReportLunData_struct *buf, int bufsize)
+{
+ int rc;
+ CommandList_struct *cp;
+ unsigned char cdb[12];
+ unsigned char scsi3addr[8];
+ ErrorInfo_struct *ei;
+
+ cp = scsi_cmd_alloc(c);
+ if (cp == NULL) { /* trouble... */
+ printk("cmd_alloc returned NULL!\n");
+ return -1;
+ }
+
+ memset(&scsi3addr[0], 0, 8); /* address the controller */
+ cdb[0] = CISS_REPORT_PHYS;
+ cdb[1] = 0;
+ cdb[2] = 0;
+ cdb[3] = 0;
+ cdb[4] = 0;
+ cdb[5] = 0;
+ cdb[6] = (bufsize >> 24) & 0xFF; //MSB
+ cdb[7] = (bufsize >> 16) & 0xFF;
+ cdb[8] = (bufsize >> 8) & 0xFF;
+ cdb[9] = bufsize & 0xFF;
+ cdb[10] = 0;
+ cdb[11] = 0;
+
+ rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr,
+ cdb, 12,
+ (unsigned char *) buf,
+ bufsize, XFER_READ);
+
+ if (rc != 0) return rc; /* something went wrong */
+
+ ei = cp->err_info;
+ if (ei->CommandStatus != 0 &&
+ ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ cciss_scsi_interpret_error(cp);
+ scsi_cmd_free(c, cp);
+ return -1;
+ }
+ scsi_cmd_free(c, cp);
+ return 0;
+}
+
+static void
+cciss_update_non_disk_devices(int cntl_num, int hostno)
+{
+ /* the idea here is we could get notified from /proc
+ that some devices have changed, so we do a report
+ physical luns cmd, and adjust our list of devices
+ accordingly. (We can't rely on the scsi-mid layer just
+ doing inquiries, because the "busses" that the scsi
+ mid-layer probes are totally fabricated by this driver,
+ so new devices wouldn't show up.
+
+ the scsi3addr's of devices won't change so long as the
+ adapter is not reset. That means we can rescan and
+ tell which devices we already know about, vs. new
+ devices, vs. disappearing devices.
+
+ Also, if you yank out a tape drive, then put in a disk
+ in it's place, (say, a configured volume from another
+ array controller for instance) _don't_ poke this driver
+ (so it thinks it's still a tape, but _do_ poke the scsi
+ mid layer, so it does an inquiry... the scsi mid layer
+ could see the physical disk. This would be bad. Need to
+ think about how to prevent that. One idea would be to
+ snoop all scsi responses and if an inquiry repsonse comes
+ back that reports a disk, chuck it an return selection
+ timeout instead and adjust our table... Not sure i like
+ that though.
+
+ */
+
+ ReportLunData_struct *ld_buff;
+ InquiryData_struct *inq_buff;
+ unsigned char scsi3addr[8];
+ ctlr_info_t *c;
+ __u32 num_luns=0;
+ unsigned char *ch;
+ /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
+ struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ int ncurrent=0;
+ int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
+ int i;
+
+ c = (ctlr_info_t *) hba[cntl_num];
+ ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
+ if (ld_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ return;
+ }
+ memset(ld_buff, 0, reportlunsize);
+ inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ kfree(ld_buff);
+ return;
+ }
+
+ if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
+ ch = &ld_buff->LUNListLength[0];
+ num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
+ if (num_luns > CISS_MAX_PHYS_LUN) {
+ printk(KERN_WARNING
+ "cciss: Maximum physical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
+ num_luns - CISS_MAX_PHYS_LUN);
+ num_luns = CISS_MAX_PHYS_LUN;
+ }
+ }
+ else {
+ printk(KERN_ERR "cciss: Report physical LUNs failed.\n");
+ return;
+ }
+
+
+ /* adjust our table of devices */
+ for(i=0; i<num_luns; i++)
+ {
+ int devtype;
+
+ /* for each physical lun, do an inquiry */
+ memset(inq_buff, 0, sizeof(InquiryData_struct));
+ memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
+
+ if (cciss_scsi_do_inquiry(hba[cntl_num],
+ scsi3addr, inq_buff) != 0)
+ {
+ /* Inquiry failed (msg printed already) */
+ devtype = 0; /* so we will skip this device. */
+ } else /* what kind of device is this? */
+ devtype = (inq_buff->data_byte[0] & 0x1f);
+
+ switch (devtype)
+ {
+ case 0x01: /* sequential access, (tape) */
+ case 0x08: /* medium changer */
+ if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+ printk(KERN_INFO "cciss%d: %s ignored, "
+ "too many devices.\n", cntl_num,
+ DEVICETYPE(devtype));
+ break;
+ }
+ memcpy(¤tsd[ncurrent].scsi3addr[0],
+ &scsi3addr[0], 8);
+ currentsd[ncurrent].devtype = devtype;
+ currentsd[ncurrent].bus = -1;
+ currentsd[ncurrent].target = -1;
+ currentsd[ncurrent].lun = -1;
+ ncurrent++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
+
+ kfree(inq_buff);
+ kfree(ld_buff);
+ return;
+}
+
+static int
+is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c
+{
+ int verb_len = strlen(verb);
+ if (len >= verb_len && !memcmp(verb,ptr,verb_len))
+ return verb_len;
+ else
+ return 0;
+}
+
+static int
+cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
+{
+ int arg_len;
+
+ if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
+ cciss_update_non_disk_devices(ctlr, hostno);
+ else
+ return -EINVAL;
+ return length;
+}
+
+/* It's a pity that we need this, but, we do... */
+extern struct Scsi_Host *scsi_hostlist; /* from ../scsi/hosts.c */
+
+int
+cciss_scsi_proc_info(char *buffer, /* data buffer */
+ char **start, /* where data in buffer starts */
+ off_t offset, /* offset from start of imaginary file */
+ int length, /* length of data in buffer */
+ int hostnum, /* which host adapter (always zero for me) */
+ int func) /* 0 == read, 1 == write */
+{
+
+ int buflen, datalen;
+ struct Scsi_Host *sh;
+ int found;
+ ctlr_info_t *ci;
+ int cntl_num;
+
+ /* Lets see if we can find our Scsi_Host...
+ this might be kind of "bad", searching scis_hostlist this way
+ but how else can we find the scsi host? I think I've seen
+ this coded both ways, (circular list and null terminated list)
+ I coded it to work either way, since I wasn't sure. */
+
+ sh = scsi_hostlist;
+ found=0;
+ do {
+ if (sh == NULL) break;
+ if (sh->host_no == hostnum) {
+ found++;
+ break;
+ }
+ sh = sh->next;
+ } while (sh != scsi_hostlist && sh != NULL);
+
+ if (sh == NULL || found == 0) /* This really shouldn't ever happen. */
+ return -EINVAL;
+
+ ci = (ctlr_info_t *) sh->hostdata[0];
+ if (ci == NULL) /* This really shouldn't ever happen. */
+ return -EINVAL;
+
+ cntl_num = ci->ctlr; /* Get our index into the hba[] array */
+
+ if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
+ buflen = sprintf(buffer, "hostnum=%d\n", hostnum);
+
+ datalen = buflen - offset;
+ if (datalen < 0) { /* they're reading past EOF. */
+ datalen = 0;
+ *start = buffer+buflen;
+ } else
+ *start = buffer + offset;
+ return(datalen);
+ } else /* User is writing to /proc/scsi/cciss*?/?* ... */
+ return cciss_scsi_user_command(cntl_num, hostnum,
+ buffer, length);
+}
+
+/* this is via the generic proc support */
+const char *
+cciss_scsi_info(struct Scsi_Host *sa)
+{
+ static char buf[300];
+ ctlr_info_t *ci;
+
+ /* probably need to work on putting a bit more info in here... */
+ /* this is output via the /proc filesystem. */
+
+ ci = (ctlr_info_t *) sa->hostdata[0];
+
+ sprintf(buf, "%s %c%c%c%c\n",
+ ci->product_name,
+ ci->firm_ver[0],
+ ci->firm_ver[1],
+ ci->firm_ver[2],
+ ci->firm_ver[3]);
+
+ return buf;
+}
+
+
+/* cciss_scatter_gather takes a Scsi_Cmnd, (cmd), and does the pci
+ dma mapping and fills in the scatter gather entries of the
+ cciss command, cp. */
+
+static void
+cciss_scatter_gather(struct pci_dev *pdev,
+ CommandList_struct *cp,
+ Scsi_Cmnd *cmd)
+{
+ unsigned int use_sg, nsegs=0, len;
+ struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
+ __u64 addr64;
+
+ /* is it just one virtual address? */
+ if (!cmd->use_sg) {
+ if (cmd->request_bufflen) { /* anything to xfer? */
+
+ addr64 = (__u64) pci_map_single(pdev,
+ cmd->request_buffer,
+ cmd->request_bufflen,
+ scsi_to_pci_dma_dir(cmd->sc_data_direction));
+
+ cp->SG[0].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Len = cmd->request_bufflen;
+ nsegs=1;
+ }
+ } /* else, must be a list of virtual addresses.... */
+ else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg,
+ scsi_to_pci_dma_dir(cmd->sc_data_direction));
+
+ for (nsegs=0; nsegs < use_sg; nsegs++) {
+ addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
+ len = sg_dma_len(&scatter[nsegs]);
+ cp->SG[nsegs].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[nsegs].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[nsegs].Len = len;
+ cp->SG[nsegs].Ext = 0; // we are not chaining
+ }
+ } else BUG();
+
+ cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
+ return;
+}
+
+
+int
+cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ ctlr_info_t **c;
+ int ctlr, rc;
+ unsigned char scsi3addr[8];
+ CommandList_struct *cp;
+
+ // Get the ptr to our adapter structure (hba[i]) out of cmd->host.
+ // We violate cmd->host privacy here. (Is there another way?)
+ c = (ctlr_info_t **) &cmd->host->hostdata[0];
+ ctlr = (*c)->ctlr;
+
+ rc = lookup_scsi3addr(ctlr, cmd->channel, cmd->target, cmd->lun,
+ scsi3addr);
+ if (rc != 0) {
+ /* the scsi nexus does not match any that we presented... */
+ /* pretend to mid layer that we got selection timeout */
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ /* we might want to think about registering controller itself
+ as a processor device on the bus so sg binds to it. */
+ return 0;
+ }
+
+ // printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n",
+ // cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);
+
+ /* Ok, we have a reasonable scsi nexus, so send the cmd down, and
+ see what the device thinks of it. */
+
+ cp = scsi_cmd_alloc(*c);
+ if (cp == NULL) { /* trouble... */
+ printk("scsi_cmd_alloc returned NULL!\n");
+ /* FIXME: next 3 lines are -> BAD! <- */
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
+
+ // Fill in the command list header
+
+ cmd->scsi_done = done; // save this for use by completion code
+
+ // save cp in case we have to abort it
+ cmd->host_scribble = (unsigned char *) cp;
+
+ cp->cmd_type = CMD_SCSI;
+ cp->scsi_cmd = cmd;
+ cp->Header.ReplyQueue = 0; // unused in simple mode
+ memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
+
+ // Fill in the request block...
+
+ cp->Request.Timeout = 0; // No timeout
+ memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+ if (cmd->cmd_len > sizeof(cp->Request.CDB)) BUG();
+ cp->Request.CDBLen = cmd->cmd_len;
+ memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
+ cp->Request.Type.Type = TYPE_CMD;
+ cp->Request.Type.Attribute = ATTR_SIMPLE;
+ switch(cmd->sc_data_direction)
+ {
+ case SCSI_DATA_WRITE: cp->Request.Type.Direction = XFER_WRITE; break;
+ case SCSI_DATA_READ: cp->Request.Type.Direction = XFER_READ; break;
+ case SCSI_DATA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
+
+ case SCSI_DATA_UNKNOWN:
+ // This can happen if a buggy application does a scsi passthru
+ // and sets both inlen and outlen to non-zero. ( see
+ // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+
+ cp->Request.Type.Direction = XFER_RSVD;
+ // This is technically wrong, and cciss controllers should
+ // reject it with CMD_INVALID, which is the most correct
+ // response, but non-fibre backends appear to let it
+ // slide by, and give the same results as if this field
+ // were set correctly. Either way is acceptable for
+ // our purposes here.
+
+ break;
+
+ default:
+ printk("cciss: unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+
+ cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
+
+ /* Put the request on the tail of the request queue */
+
+ addQ(&(*c)->reqQ, cp);
+ (*c)->Qdepth++;
+ start_io(*c);
+
+ /* the cmd'll come back via intr handler in complete_scsi_command() */
+ return 0;
+}
+
+static void
+init_driver_template(int ctlr)
+{
+ memset(&driver_template[ctlr], 0, sizeof(driver_template[ctlr]));
+ driver_template[ctlr].name = ccissscsi[ctlr].name;
+ driver_template[ctlr].proc_name = ccissscsi[ctlr].name;
+ driver_template[ctlr].detect = cciss_scsi_detect;
+ driver_template[ctlr].release = cciss_scsi_release;
+ driver_template[ctlr].proc_info = cciss_scsi_proc_info;
+ driver_template[ctlr].queuecommand = cciss_scsi_queue_command;
+ driver_template[ctlr].eh_abort_handler = NULL;
+ driver_template[ctlr].eh_device_reset_handler = NULL;
+ driver_template[ctlr].bios_param = scsicam_bios_param;
+ driver_template[ctlr].can_queue = SCSI_CCISS_CAN_QUEUE;
+ driver_template[ctlr].this_id = SELF_SCSI_ID;
+ driver_template[ctlr].sg_tablesize = MAXSGENTRIES;
+ driver_template[ctlr].cmd_per_lun = 1;
+ driver_template[ctlr].use_new_eh_code = 1;
+ driver_template[ctlr].use_clustering = DISABLE_CLUSTERING;
+ driver_template[ctlr].module = THIS_MODULE;
+
+ /* set scsi_host to NULL so our detect routine will
+ find us on register */
+
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[ctlr]->scsi_ctlr)->scsi_host = NULL;
+
+}
+
+static void
+cciss_unregister_scsi(int ctlr)
+{
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+ unsigned long flags;
+
+ /* we are being forcibly unloaded, and may not refuse. */
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+ stk = &sa->cmd_stack;
+
+ /* if we weren't ever actually registered, don't unregister */
+ if (((struct cciss_scsi_adapter_data_t *)
+ hba[ctlr]->scsi_ctlr)->registered) {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ scsi_unregister_module(MODULE_SCSI_HA, &driver_template[ctlr]);
+ spin_lock_irqsave(&io_request_lock, flags);
+ }
+ init_driver_template(ctlr);
+ scsi_cmd_stack_free(ctlr);
+ kfree(hba[ctlr]->scsi_ctlr);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+static int
+cciss_register_scsi(int ctlr, int this_is_init_time)
+{
+ unsigned long flags;
+
+ CPQ_TAPE_LOCK(ctlr, flags);
+
+ sprintf( ccissscsi[ctlr].name, "cciss%d", ctlr );
+
+ init_driver_template(ctlr);
+
+ /* Since this is really a block driver, the SCSI core may not be
+ initialized yet, in which case, calling scsi_register_module
+ would hang. instead, we will do it later, via /proc filesystem
+ and rc scripts, when we know SCSI core is good to go. */
+
+ if (this_is_init_time) {
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ return 0;
+ }
+
+ /* Only register if SCSI devices are detected. */
+ if (ccissscsi[ctlr].ndevices != 0) {
+ ((struct cciss_scsi_adapter_data_t *)
+ hba[ctlr]->scsi_ctlr)->registered = 1;
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ return scsi_register_module(MODULE_SCSI_HA,
+ &driver_template[ctlr]);
+ }
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ printk(KERN_INFO
+ "cciss%d: No appropriate SCSI device detected, "
+ "SCSI subsystem not engaged.\n", ctlr);
+ return 0;
+}
+
+static int
+cciss_engage_scsi(int ctlr)
+{
+ struct cciss_scsi_adapter_data_t *sa;
+ struct cciss_scsi_cmd_stack_t *stk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+ stk = &sa->cmd_stack;
+
+ if (((struct cciss_scsi_adapter_data_t *)
+ hba[ctlr]->scsi_ctlr)->registered) {
+ printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return ENXIO;
+ }
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ cciss_update_non_disk_devices(ctlr, -1);
+ cciss_register_scsi(ctlr, 0);
+ return 0;
+}
+
+static void
+cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
+{
+ int size;
+ unsigned int flags;
+
+ *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
+
+ CPQ_TAPE_LOCK(ctlr, flags);
+ size = sprintf(buffer + *len,
+ "Sequential access devices: %d\n\n",
+ ccissscsi[ctlr].ndevices);
+ CPQ_TAPE_UNLOCK(ctlr, flags);
+ *pos += size; *len += size;
+}
+
+#else /* no CONFIG_CISS_SCSI_TAPE */
+
+/* If no tape support, then these become defined out of existence */
+
+#define cciss_find_non_disk_devices(cntl_num)
+#define cciss_unregister_scsi(ctlr)
+#define cciss_register_scsi(ctlr, this_is_init_time)
+#define cciss_proc_tape_report(ctlr, buffer, pos, len)
+
+#endif /* CONFIG_CISS_SCSI_TAPE */